diff --git a/server.cpp b/server.cpp index ba0d98a..0a67032 100644 --- a/server.cpp +++ b/server.cpp @@ -129,7 +129,7 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); + fprintf(stderr, "%s: invalid model file '%s' (bad magic (0x%x))\n", __func__, fname.c_str(),magic); return false; } } diff --git a/utils.cpp b/utils.cpp index 20847e5..cae5766 100644 --- a/utils.cpp +++ b/utils.cpp @@ -80,8 +80,8 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); - fprintf(stderr, " -sp, --server-port change the default 8080 port\n"); - fprintf(stderr, " -sa, --server-address change the default 0.0.0.0 address\n"); + fprintf(stderr, " -sp, --server-port N change the default 8080 port\n"); + fprintf(stderr, " -sa, --server-address
change the default 0.0.0.0 address\n"); fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n"); fprintf(stderr, " in interactive mode, poll user input upon seeing PROMPT\n"); fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");