From 4f0358b4efddbf67bac927b87e77548738a58d54 Mon Sep 17 00:00:00 2001 From: japelsin Date: Mon, 26 Feb 2024 18:01:41 +0100 Subject: [PATCH] feat(search): add model flag --- README.md | 14 ++++++++------ cmd/search.go | 13 +++++++------ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index ccff2d0..5fdf064 100644 --- a/README.md +++ b/README.md @@ -38,12 +38,14 @@ Usage: pplx search [flags] Flags: - -m, --max_tokens int Maximum number of tokens to be used per request. Defaults to config value. (default 1000) - -f, --frequency_penalty int How much to penalize token frequency. - -p, --presence_penalty int How much to penalize token presence. Between -2 and 2. - -t, --temperature int The amount of randomness in the response. Between 0 and 2. - -K, --top_k int Number of tokens to consider when generating tokens. Between 0 and 2048. - -P, --top_p int Nucleus sampling. Probability cutoff for token selection. Between 0 and 1. + --frequency_penalty int How much to penalize token frequency. + --max_tokens int Maximum number of tokens to be used per request. Defaults to config value. (default 1000) + --model string Model to use. Default to config value. (default "sonar-small-online") + --presence_penalty int How much to penalize token presence. Between -2 and 2. + --temperature int The amount of randomness in the response. Between 0 and 2. + --top_k int Number of tokens to consider when generating tokens. Between 0 and 2048. + --top_p int Nucleus sampling. Probability cutoff for token selection. Between 0 and 1. + ``` The API reference can be found [here](https://docs.perplexity.ai/reference/post_chat_completions). diff --git a/cmd/search.go b/cmd/search.go index e0ef892..1181677 100644 --- a/cmd/search.go +++ b/cmd/search.go @@ -127,10 +127,11 @@ var searchCmd = &cobra.Command{ func init() { rootCmd.AddCommand(searchCmd) - searchCmd.Flags().IntP(utils.MaxTokensKey, "m", 1000, "Maximum number of tokens to be used per request. Defaults to config value.") - searchCmd.Flags().IntP(utils.TemperatureKey, "t", 0, "The amount of randomness in the response. Between 0 and 2.") - searchCmd.Flags().IntP(utils.TopKKey, "K", 0, "Number of tokens to consider when generating tokens. Between 0 and 2048.") - searchCmd.Flags().IntP(utils.TopPKey, "P", 0, "Nucleus sampling. Probability cutoff for token selection. Between 0 and 1.") - searchCmd.Flags().IntP(utils.FrequencyPenaltyKey, "f", 0, "How much to penalize token frequency.") - searchCmd.Flags().IntP(utils.PresencePenaltyKey, "p", 0, "How much to penalize token presence. Between -2 and 2.") + searchCmd.Flags().Int(utils.MaxTokensKey, 1000, "Maximum number of tokens to be used per request. Defaults to config value.") + searchCmd.Flags().Int(utils.TemperatureKey, 0, "The amount of randomness in the response. Between 0 and 2.") + searchCmd.Flags().Int(utils.TopKKey, 0, "Number of tokens to consider when generating tokens. Between 0 and 2048.") + searchCmd.Flags().Int(utils.TopPKey, 0, "Nucleus sampling. Probability cutoff for token selection. Between 0 and 1.") + searchCmd.Flags().Int(utils.FrequencyPenaltyKey, 0, "How much to penalize token frequency.") + searchCmd.Flags().Int(utils.PresencePenaltyKey, 0, "How much to penalize token presence. Between -2 and 2.") + searchCmd.Flags().String(utils.ModelKey, "sonar-small-online", "Model to use. Default to config value.") }