Skip to content

Commit

Permalink
Assorted small cleanups.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 720517452
  • Loading branch information
danielkeysers authored and copybara-github committed Jan 28, 2025
1 parent a248f76 commit 955d86d
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 7 deletions.
2 changes: 1 addition & 1 deletion compression/compress.h
Original file line number Diff line number Diff line change
Expand Up @@ -629,7 +629,7 @@ class ReadFromBlobStore {
// reader_ invalid or any Enqueue failed
if (err_ != 0) return err_;
// Setup the model_memory.
for (int b = 0; b < model_toc_.size(); ++b) {
for (size_t b = 0; b < model_toc_.size(); ++b) {
const std::string& file_key = file_keys_[b];
MatPtr* blob = model_toc_[b];
if (!file_toc_.Empty()) {
Expand Down
6 changes: 3 additions & 3 deletions compression/python/compression_clif_aux.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,9 @@ class SbsWriterImpl : public WriterInterface {
void AddTokenizer(const std::string& tokenizer_path) override {
Path path(tokenizer_path);
GemmaTokenizer tokenizer(path);
tokenizer_proto_ = tokenizer.Serialize();
compressor_.AddTokenizer(tokenizer_proto_);
std::string tokenizer_proto = tokenizer.Serialize();
HWY_ASSERT(!tokenizer_proto.empty());
compressor_.AddTokenizer(tokenizer_proto);
}

// Returns the number of blobs added.
Expand All @@ -159,7 +160,6 @@ class SbsWriterImpl : public WriterInterface {
std::vector<MatStorage> model_memory_;
std::vector<float> scales_;
CompressorMode mode_;
std::string tokenizer_proto_;
};

WriterInterface* NewSbsWriter(CompressorMode mode) {
Expand Down
2 changes: 0 additions & 2 deletions gemma/gemma-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,6 @@ HWY_NOINLINE void FFWNoVit(Activations& activations, size_t num_interleaved,
PROFILER_ZONE("Gen.FFW");
const size_t model_dim = layer_weights->layer_config.model_dim;
const size_t ffh_hidden_dim = layer_weights->layer_config.ff_hidden_dim;
using WeightType = T;
HWY_DASSERT(num_interleaved <= activations.bf_pre_ffw_rms_out.BatchSize());

const bool add_bias = layer_weights->layer_config.ff_biases;
Expand Down Expand Up @@ -746,7 +745,6 @@ HWY_NOINLINE void FFWVit(Activations& activations, size_t num_interleaved,
const LayerWeightsPtrs<T>* layer_weights) {
PROFILER_ZONE("Gen.FFW");
const size_t ff_hidden_dim = layer_weights->layer_config.ff_hidden_dim;
using WeightType = typename LayerWeightsPtrs<T>::WeightF32OrBF16;
HWY_DASSERT(num_interleaved <= activations.bf_pre_ffw_rms_out.BatchSize());

const bool add_bias = layer_weights->layer_config.ff_biases;
Expand Down
2 changes: 1 addition & 1 deletion gemma/weights.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ BlobError ModelWeightsStorage::Save(const std::string& tokenizer,
writer.AddTokenizer(tokenizer);
int err = writer.WriteAll(weights, &config_);
if (err != 0) {
fprintf(stderr, "Failed to load model weights: %d\n", err);
fprintf(stderr, "Failed to write model weights: %d\n", err);
return err;
}
return 0;
Expand Down

0 comments on commit 955d86d

Please sign in to comment.