Skip to content

Commit

Permalink
fix: Fill in input Tensor
Browse files Browse the repository at this point in the history
  • Loading branch information
yirongjie committed Mar 26, 2024
1 parent f9c9c3f commit e6fd6e0
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 13 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,14 @@ Wait.. why on-device multimodal LLM? - It's a key building block for [intelligen

| | FP32 | INT4 |
|-----------------------------------------------------------------------------|-----|------|
| [LLaMA-1/2 7B](https://github.com/facebookresearch/llama) | ✔️ | ✔️ |
| [Alpaca 7B](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) | ✔️ | ✔️ |
| [TinyLLaMA 1.1B](https://github.com/jzhang38/TinyLlama) | ✔️ | ✔️ |
| [Fuyu 8B](https://www.adept.ai/blog/fuyu-8b) | ✔️ | ✔️ |
| [Vision Transformer](https://github.com/google-research/vision_transformer) | ✔️ | ✔️ |
| [CLIP](https://github.com/openai/CLIP) | ✔️ | ✔️ |
| [ImageBind](https://github.com/facebookresearch/ImageBind) (3 modalities) | ✔️ | ✔️ |
| [LLaVA 7B](https://github.com/haotian-liu/LLaVA) | ✔️ | ✔️ |
| [LLaMA-1/2 7B](https://github.com/facebookresearch/llama) | [✔️](https://huggingface.co/mllmTeam/llama-2-7b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/llama-2-7b-mllm/tree/main) |
| [Alpaca 7B](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) | [✔️](https://huggingface.co/mllmTeam/chinese-alpaca-7b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/chinese-alpaca-7b-mllm/tree/main) |
| [TinyLLaMA 1.1B](https://github.com/jzhang38/TinyLlama) | [✔️](https://huggingface.co/mllmTeam/tinyllama-1.1b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/tinyllama-1.1b-mllm/tree/main) |
| [Fuyu 8B](https://www.adept.ai/blog/fuyu-8b) | [✔️](https://huggingface.co/mllmTeam/fuyu-8b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/fuyu-8b-mllm/tree/main) |
| [Vision Transformer](https://github.com/google-research/vision_transformer) | [✔️](https://huggingface.co/mllmTeam/vit-base-patch16-224-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/vit-base-patch16-224-mllm/tree/main) |
| [CLIP](https://github.com/openai/CLIP) | [✔️](https://huggingface.co/mllmTeam/clip-vit-base-patch32-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/clip-vit-base-patch32-mllm/tree/main) |
| [ImageBind](https://github.com/facebookresearch/ImageBind) (3 modalities) | [✔️](https://huggingface.co/mllmTeam/imagebind_huge-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/imagebind_huge-mllm/tree/main) |
| [LLaVA 7B](https://github.com/haotian-liu/LLaVA) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) | [✔️](https://huggingface.co/mllmTeam/llava-1.5-7b-mllm/tree/main) |

## Quick Start

Expand Down
10 changes: 5 additions & 5 deletions src/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,11 @@ class Layer {
}
switch (input0.status()) {
case TENSOR_STATIC_INIT: {
if (Tensor::gph_.find(input0.name()) == Tensor::gph_.end()) {
if (Tensor::gph_.find(input0.name()) == Tensor::gph_.end()|| input0.count() != Tensor::gph_[input0.name()].count()) {
Tensor::gph_[input0.name()] = input0;
Tensor::gph_[input0.name()].setName(input0.name());
}
if (Tensor::gph_.find(input1.name()) == Tensor::gph_.end()) {
if (Tensor::gph_.find(input1.name()) == Tensor::gph_.end()|| input1.count() != Tensor::gph_[input1.name()].count()) {
Tensor::gph_[input1.name()] = input1;
Tensor::gph_[input1.name()].setName(input1.name());
}
Expand Down Expand Up @@ -248,15 +248,15 @@ class Layer {
}
switch (input0.status()) {
case TENSOR_STATIC_INIT: {
if (Tensor::gph_.find(input0.name()) == Tensor::gph_.end()) {
if (Tensor::gph_.find(input0.name()) == Tensor::gph_.end() || input0.count() != Tensor::gph_[input0.name()].count()) {
Tensor::gph_[input0.name()] = input0;
Tensor::gph_[input0.name()].setName(input0.name());
}
if (Tensor::gph_.find(input1.name()) == Tensor::gph_.end()) {
if (Tensor::gph_.find(input1.name()) == Tensor::gph_.end()|| input1.count() != Tensor::gph_[input1.name()].count()) {
Tensor::gph_[input1.name()] = input1;
Tensor::gph_[input1.name()].setName(input1.name());
}
if (Tensor::gph_.find(input2.name()) == Tensor::gph_.end()) {
if (Tensor::gph_.find(input2.name()) == Tensor::gph_.end()|| input2.count() != Tensor::gph_[input0.name()].count()) {
Tensor::gph_[input2.name()] = input2;
Tensor::gph_[input2.name()].setName(input2.name());
}
Expand Down
3 changes: 3 additions & 0 deletions src/Module.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ class Module {
for (auto &input : inputs) {
input.setTtype(TensorType::NORMAL_TENSOR);
input.status() = TENSOR_STATIC_INIT;
if(input.batch() == 0){
Tensor::gph_[input.name()] = input;
}
}
tensor_status = TENSOR_STATIC_INIT;

Expand Down

0 comments on commit e6fd6e0

Please sign in to comment.