diff --git a/include/Types.hpp b/include/Types.hpp index e895aeae..aa632f66 100644 --- a/include/Types.hpp +++ b/include/Types.hpp @@ -51,8 +51,8 @@ enum DataType { MLLM_TYPE_COUNT, }; enum ChlType { - BHSD = 0, - BSHD = 1, + BSHD = 0, + BHSD = 1, }; enum Chl { diff --git a/src/Tensor.hpp b/src/Tensor.hpp index 62fd8e00..e2124426 100644 --- a/src/Tensor.hpp +++ b/src/Tensor.hpp @@ -158,14 +158,14 @@ class Tensor { // inline int offset(const int n, const int c = 0, const int h = 0, // const int w = 0) const { // CHECK_GE(n, 0); - // CHECK_LE(n, num()); - // CHECK_GE(channels(), 0); - // CHECK_LE(c, channels()); - // CHECK_GE(height(), 0); - // CHECK_LE(h, height()); - // CHECK_GE(width(), 0); - // CHECK_LE(w, width()); - // return ((n * channels() + c) * height() + h) * width() + w; + // CHECK_LE(n, batch()); + // CHECK_GE(head(), 0); + // CHECK_LE(c, head()); + // CHECK_GE(sequence(), 0); + // CHECK_LE(h, sequence()); + // CHECK_GE(dimension(), 0); + // CHECK_LE(w, dimension()); + // return ((n * head() + c) * sequence() + h) * dimension() + w; // } inline int offset(const int b, const int h = 0, const int s = 0, const int d = 0) const { @@ -536,6 +536,7 @@ class Tensor { // TODO:Name? + template void fullData(Dtype value) { for (int n = 0; n < batch(); ++n) { diff --git a/src/backends/cpu/CPUKVCache.hpp b/src/backends/cpu/CPUKVCache.hpp index 3f911d54..380acc4b 100644 --- a/src/backends/cpu/CPUKVCache.hpp +++ b/src/backends/cpu/CPUKVCache.hpp @@ -22,7 +22,8 @@ class CPUKVCache final : public Op { private: bool support_multi_thread_ = false; - int cache_seq_len_= -INFINITY; + + int cache_seq_len_= -999; bool isK_; int cache_limit_ ;