From 0b44c4eeb4d559c38a7163e67cb8b9a66b3adb88 Mon Sep 17 00:00:00 2001 From: ngjaying Date: Tue, 23 Jul 2024 16:04:48 +0800 Subject: [PATCH 1/3] feat: restore redis io (#3034) Signed-off-by: Jiyong Huang --- internal/binder/io/ext_redis.go | 29 ++ internal/io/redis/lookup.go | 141 +++++++++ internal/io/redis/lookup_test.go | 136 +++++++++ internal/io/redis/redisPub.go | 113 ++++++++ internal/io/redis/redisPub_test.go | 97 +++++++ internal/io/redis/redisSub.go | 117 ++++++++ internal/io/redis/redisSub_test.go | 93 ++++++ internal/io/redis/redis_test.go | 82 ++++++ internal/io/redis/sink.go | 205 +++++++++++++ internal/io/redis/sink_test.go | 449 +++++++++++++++++++++++++++++ test/run_jmeter.sh | 4 +- 11 files changed, 1464 insertions(+), 2 deletions(-) create mode 100644 internal/binder/io/ext_redis.go create mode 100644 internal/io/redis/lookup.go create mode 100644 internal/io/redis/lookup_test.go create mode 100644 internal/io/redis/redisPub.go create mode 100644 internal/io/redis/redisPub_test.go create mode 100644 internal/io/redis/redisSub.go create mode 100644 internal/io/redis/redisSub_test.go create mode 100644 internal/io/redis/redis_test.go create mode 100644 internal/io/redis/sink.go create mode 100644 internal/io/redis/sink_test.go diff --git a/internal/binder/io/ext_redis.go b/internal/binder/io/ext_redis.go new file mode 100644 index 0000000000..cff8e2fd04 --- /dev/null +++ b/internal/binder/io/ext_redis.go @@ -0,0 +1,29 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build redisdb || !core + +package io + +import ( + "github.com/lf-edge/ekuiper/v2/internal/io/redis" + "github.com/lf-edge/ekuiper/v2/pkg/modules" +) + +func init() { + modules.RegisterLookupSource("redis", redis.GetLookupSource) + modules.RegisterSink("redis", redis.GetSink) + modules.RegisterSink("redisPub", redis.RedisPub) + modules.RegisterSource("redisSub", redis.RedisSub) +} diff --git a/internal/io/redis/lookup.go b/internal/io/redis/lookup.go new file mode 100644 index 0000000000..02b30a2b08 --- /dev/null +++ b/internal/io/redis/lookup.go @@ -0,0 +1,141 @@ +// Copyright 2022-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/redis/go-redis/v9" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/cast" +) + +type conf struct { + // host:port address. + Addr string `json:"addr,omitempty"` + Username string `json:"username,omitempty"` + // Optional password. Must match the password specified in the + Password string `json:"password,omitempty"` + DataType string `json:"dataType,omitempty"` + DB string `json:"datasource,omitempty"` +} + +type lookupSource struct { + c *conf + db int + cli *redis.Client +} + +func (s *lookupSource) Provision(ctx api.StreamContext, props map[string]any) error { + return s.Validate(props) +} + +func (s *lookupSource) Connect(ctx api.StreamContext) error { + logger := ctx.GetLogger() + logger.Debug("Opening redis lookup source") + + s.cli = redis.NewClient(&redis.Options{ + Addr: s.c.Addr, + Username: s.c.Username, + Password: s.c.Password, + DB: s.db, // use default DB + }) + _, err := s.cli.Ping(ctx).Result() + return err +} + +func (s *lookupSource) Lookup(ctx api.StreamContext, _ []string, keys []string, values []any) ([]map[string]any, error) { + ctx.GetLogger().Debugf("Lookup redis %v", keys) + if len(keys) != 1 { + return nil, fmt.Errorf("redis lookup only support one key, but got %v", keys) + } + v := fmt.Sprintf("%v", values[0]) + if s.c.DataType == "string" { + res, err := s.cli.Get(ctx, v).Result() + if err != nil { + if err == redis.Nil { + return []map[string]any{}, nil + } + return nil, err + } + m := make(map[string]any) + err = json.Unmarshal(cast.StringToBytes(res), &m) + if err != nil { + return nil, err + } + return []map[string]any{m}, nil + } else { + res, err := s.cli.LRange(ctx, v, 0, -1).Result() + if err != nil { + if err == redis.Nil { + return []map[string]any{}, nil + } + return nil, err + } + ret := make([]map[string]any, 0, len(res)) + for _, r := range res { + m := make(map[string]any) + err = json.Unmarshal(cast.StringToBytes(r), &m) + if err != nil { + return nil, err + } + ret = append(ret, m) + } + return ret, nil + } +} + +func (s *lookupSource) Validate(props map[string]any) error { + cfg := &conf{} + err := cast.MapToStruct(props, cfg) + if err != nil { + return err + } + if cfg.Addr == "" { + return errors.New("redis addr is null") + } + if cfg.DataType != "string" && cfg.DataType != "list" { + return errors.New("redis dataType must be string or list") + } + s.db, err = strconv.Atoi(cfg.DB) + if err != nil { + return fmt.Errorf("datasource %s is invalid", cfg.DB) + } + if s.db < 0 || s.db > 15 { + return fmt.Errorf("redis lookup source db should be in range 0-15") + } + s.c = cfg + return nil +} + +func (s *lookupSource) Open(ctx api.StreamContext) error { + ctx.GetLogger().Infof("Opening redis lookup source with conf %v", s.c) + return nil +} + +func (s *lookupSource) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("Closing redis lookup source") + return s.cli.Close() +} + +func GetLookupSource() api.Source { + return &lookupSource{} +} + +var _ api.LookupSource = &lookupSource{} diff --git a/internal/io/redis/lookup_test.go b/internal/io/redis/lookup_test.go new file mode 100644 index 0000000000..bd6067656c --- /dev/null +++ b/internal/io/redis/lookup_test.go @@ -0,0 +1,136 @@ +// Copyright 2022-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "testing" + + "github.com/alicebob/miniredis/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/lf-edge/ekuiper/contract/v2/api" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" +) + +func init() { + s, err := miniredis.Run() + if err != nil { + panic(err) + } + addr = "localhost:" + s.Port() + // Mock id key data + s.Set("1", `{"id":1,"name":"John","address":34,"mobile":"334433"}`) + s.Set("2", `{"id":2,"name":"Susan","address":22,"mobile":"666433"}`) + // Mock group key list data + s.Lpush("group1", `{"id":1,"name":"John"}`) + s.Lpush("group1", `{"id":2,"name":"Susan"}`) + s.Lpush("group2", `{"id":3,"name":"Nancy"}`) + s.Lpush("group3", `{"id":4,"name":"Tom"}`) + mr = s +} + +// TestSingle test lookup value of a single map +func TestSingle(t *testing.T) { + ctx := mockContext.NewMockContext("test", "tt") + ls := GetLookupSource() + err := ls.Provision(ctx, map[string]any{"addr": addr, "datatype": "string", "datasource": "0"}) + if err != nil { + t.Error(err) + return + } + err = ls.Connect(ctx) + if err != nil { + t.Error(err) + return + } + tests := []struct { + value int + result []map[string]any + }{ + { + value: 1, + result: []map[string]any{ + {"id": float64(1), "name": "John", "address": float64(34), "mobile": "334433"}, + }, + }, { + value: 2, + result: []map[string]any{ + {"id": float64(2), "name": "Susan", "address": float64(22), "mobile": "666433"}, + }, + }, { + value: 3, + result: []map[string]any{}, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + actual, err := ls.(api.LookupSource).Lookup(ctx, []string{}, []string{"id"}, []any{tt.value}) + assert.NoError(t, err) + assert.Equal(t, tt.result, actual) + }) + } +} + +func TestList(t *testing.T) { + ctx := mockContext.NewMockContext("test", "tt") + ls := GetLookupSource() + err := ls.Provision(ctx, map[string]any{"addr": addr, "datatype": "list", "datasource": "0"}) + if err != nil { + t.Error(err) + return + } + err = ls.Connect(ctx) + if err != nil { + t.Error(err) + return + } + tests := []struct { + value string + result []map[string]any + }{ + { + value: "group1", + result: []map[string]any{ + {"id": float64(2), "name": "Susan"}, + {"id": float64(1), "name": "John"}, + }, + }, { + value: "group2", + result: []map[string]any{ + {"id": float64(3), "name": "Nancy"}, + }, + }, { + value: "group4", + result: []map[string]any{}, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + actual, err := ls.(api.LookupSource).Lookup(ctx, []string{}, []string{"id"}, []any{tt.value}) + assert.NoError(t, err) + assert.Equal(t, tt.result, actual) + }) + } +} + +func TestLookupSourceDB(t *testing.T) { + ctx := mockContext.NewMockContext("test", "tt") + s := &lookupSource{} + err := s.Provision(ctx, map[string]any{"addr": addr, "datatype": "string", "datasource": "199"}) + require.Error(t, err) + require.Equal(t, "redis lookup source db should be in range 0-15", err.Error()) +} diff --git a/internal/io/redis/redisPub.go b/internal/io/redis/redisPub.go new file mode 100644 index 0000000000..93fde40634 --- /dev/null +++ b/internal/io/redis/redisPub.go @@ -0,0 +1,113 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "context" + "fmt" + + "github.com/redis/go-redis/v9" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/errorx" +) + +type redisPub struct { + conf *redisPubConfig + conn *redis.Client +} + +type redisPubConfig struct { + Address string `json:"address"` + Db int `json:"db"` + Username string `json:"username"` + Password string `json:"password"` + Channel string `json:"channel"` +} + +func (r *redisPub) Validate(props map[string]any) error { + cfg := &redisPubConfig{} + err := cast.MapToStruct(props, cfg) + if err != nil { + return fmt.Errorf("read properties %v fail with error: %v", props, err) + } + if cfg.Db < 0 || cfg.Db > 15 { + return fmt.Errorf("redisPub db should be in range 0-15") + } + if cfg.Channel == "" { + return fmt.Errorf("redisPub sink is missing property channel") + } + r.conf = cfg + return nil +} + +func (r *redisPub) Ping(_ string, props map[string]any) error { + if err := r.Validate(props); err != nil { + return err + } + r.conn = redis.NewClient(&redis.Options{ + Addr: r.conf.Address, + Username: r.conf.Username, + Password: r.conf.Password, + DB: r.conf.Db, + }) + if err := r.conn.Ping(context.Background()).Err(); err != nil { + return fmt.Errorf("Ping Redis failed with error: %v", err) + } + return nil +} + +func (r *redisPub) Provision(ctx api.StreamContext, props map[string]any) error { + return r.Validate(props) +} + +func (r *redisPub) Connect(ctx api.StreamContext) error { + ctx.GetLogger().Infof("redisSub is opening") + r.conn = redis.NewClient(&redis.Options{ + Addr: r.conf.Address, + Username: r.conf.Username, + Password: r.conf.Password, + DB: r.conf.Db, + }) + _, err := r.conn.Ping(ctx).Result() + return err +} + +func (r *redisPub) Collect(ctx api.StreamContext, item api.RawTuple) error { + // Publish + err := r.conn.Publish(ctx, r.conf.Channel, item.Raw()).Err() + if err != nil { + return errorx.NewIOErr(fmt.Sprintf(`Error occurred while publishing the Redis message to %s`, r.conf.Address)) + } + return nil +} + +func (r *redisPub) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("Closing redisPub sink") + if r.conn != nil { + err := r.conn.Close() + if err != nil { + return err + } + } + return nil +} + +func RedisPub() api.Sink { + return &redisPub{} +} + +var _ api.BytesCollector = &redisPub{} diff --git a/internal/io/redis/redisPub_test.go b/internal/io/redis/redisPub_test.go new file mode 100644 index 0000000000..cb2333a915 --- /dev/null +++ b/internal/io/redis/redisPub_test.go @@ -0,0 +1,97 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/lf-edge/ekuiper/v2/internal/pkg/util" + "github.com/lf-edge/ekuiper/v2/pkg/mock" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" +) + +func TestRedisPub(t *testing.T) { + server, _ := mockRedisPubSub(false, true, DefaultChannel) + defer server.Close() + + s := &redisPub{} + input := [][]byte{ + []byte(`{"humidity":50,"status":"green","temperature":22}`), + []byte(`{"humidity":82,"status":"wet","temperature":25}`), + []byte(`{"humidity":60,"status":"hot","temperature":33}`), + } + err := mock.RunBytesSinkCollect(s, input, map[string]any{ + "address": addr, + "db": 0, + "password": "", + "channel": DefaultChannel, + "compression": "", + }) + assert.NoError(t, err) +} + +func TestSinkConfigure(t *testing.T) { + s := RedisPub() + prop := map[string]any{ + "address": "", + "db": "", + "channel": DefaultChannel, + } + expErrStr := fmt.Sprintf("read properties %v fail with error: %v", prop, "1 error(s) decoding:\n\n* 'db' expected type 'int', got unconvertible type 'string', value: ''") + ctx := mockContext.NewMockContext("testSinkConfigure", "op1") + err := s.Provision(ctx, prop) + if err == nil { + t.Errorf("should have error") + return + } else if err.Error() != expErrStr { + t.Errorf("error mismatch:\n\nexp=%v\n\ngot=%v\n\n", expErrStr, err.Error()) + } +} + +func TestSinkPingRedisError(t *testing.T) { + s := RedisPub().(util.PingableConn) + prop := map[string]any{ + "address": "127.0.0.1:6379", + "db": 0, + "channel": DefaultChannel, + } + expErrStr := fmt.Sprintf("Ping Redis failed with error") + err := s.Ping("", prop) + if err == nil { + t.Errorf("should have error") + return + } else { + errorMsg := fmt.Sprintf("%v", err) + parts := strings.SplitN(errorMsg, ":", 2) + if parts[0] != expErrStr { + t.Errorf("error mismatch:\n\nexp=%s\n\ngot=%s\n\n", expErrStr, parts[0]) + } + } +} + +func TestRedisPubDb(t *testing.T) { + props := map[string]any{ + "db": 199, + } + r := &redisPub{} + err := r.Validate(props) + require.Error(t, err) + require.Equal(t, "redisPub db should be in range 0-15", err.Error()) +} diff --git a/internal/io/redis/redisSub.go b/internal/io/redis/redisSub.go new file mode 100644 index 0000000000..7b76d769b7 --- /dev/null +++ b/internal/io/redis/redisSub.go @@ -0,0 +1,117 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "context" + "fmt" + + "github.com/redis/go-redis/v9" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +type redisSub struct { + conf *redisSubConfig + conn *redis.Client +} + +type redisSubConfig struct { + Address string `json:"address"` + Db int `json:"db"` + Username string `json:"username"` + Password string `json:"password"` + Channels []string `json:"channels"` +} + +func (r *redisSub) Validate(props map[string]any) error { + cfg := &redisSubConfig{} + err := cast.MapToStruct(props, cfg) + if err != nil { + return fmt.Errorf("read properties %v fail with error: %v", props, err) + } + if cfg.Db < 0 || cfg.Db > 15 { + return fmt.Errorf("redisSub db should be in range 0-15") + } + r.conf = cfg + return nil +} + +func (r *redisSub) Ping(dataSource string, props map[string]any) error { + if err := r.Validate(props); err != nil { + return err + } + r.conn = redis.NewClient(&redis.Options{ + Addr: r.conf.Address, + Username: r.conf.Username, + Password: r.conf.Password, + DB: r.conf.Db, + }) + if err := r.conn.Ping(context.Background()).Err(); err != nil { + return fmt.Errorf("Ping Redis failed with error: %v", err) + } + return nil +} + +func (r *redisSub) Provision(ctx api.StreamContext, props map[string]any) error { + return r.Validate(props) +} + +func (r *redisSub) Connect(ctx api.StreamContext) error { + ctx.GetLogger().Infof("redisSub is opening") + r.conn = redis.NewClient(&redis.Options{ + Addr: r.conf.Address, + Username: r.conf.Username, + Password: r.conf.Password, + DB: r.conf.Db, + }) + _, err := r.conn.Ping(ctx).Result() + return err +} + +func (r *redisSub) Subscribe(ctx api.StreamContext, ingest api.BytesIngest, _ api.ErrorIngest) error { + // Subscribe to Redis channels + sub := r.conn.PSubscribe(ctx, r.conf.Channels...) + channel := sub.Channel() + defer sub.Close() + for { + select { + case <-ctx.Done(): + return nil + case msg := <-channel: + rcvTime := timex.GetNow() + ingest(ctx, []byte(msg.Payload), map[string]any{ + "channel": msg.Channel, + }, rcvTime) + } + } +} + +func (r *redisSub) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("Closing redisSub source") + if r.conn != nil { + err := r.conn.Close() + if err != nil { + return err + } + } + return nil +} + +func RedisSub() api.Source { + return &redisSub{} +} diff --git a/internal/io/redis/redisSub_test.go b/internal/io/redis/redisSub_test.go new file mode 100644 index 0000000000..5278c33fa9 --- /dev/null +++ b/internal/io/redis/redisSub_test.go @@ -0,0 +1,93 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + _ "go.nanomsg.org/mangos/v3/transport/ipc" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/internal/pkg/util" + "github.com/lf-edge/ekuiper/v2/pkg/mock" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" + "github.com/lf-edge/ekuiper/v2/pkg/model" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +func TestSourceConfigure(t *testing.T) { + s := RedisSub() + prop := map[string]any{ + "address": "", + "db": "", + "channels": []string{DefaultChannel}, + } + expErrStr := fmt.Sprintf("read properties %v fail with error: %v", prop, "1 error(s) decoding:\n\n* 'db' expected type 'int', got unconvertible type 'string', value: ''") + ctx := mockContext.NewMockContext("TestSourceConfigure", "op") + err := s.Provision(ctx, prop) + assert.EqualError(t, err, expErrStr) +} + +func TestRedisDB(t *testing.T) { + s := RedisSub() + prop := map[string]any{ + "address": "", + "db": 20, + "channels": []string{DefaultChannel}, + } + ctx := mockContext.NewMockContext("TestRedisDB", "op") + err := s.Provision(ctx, prop) + assert.EqualError(t, err, "redisSub db should be in range 0-15") +} + +func TestSourcePingRedisError(t *testing.T) { + s := RedisSub().(util.PingableConn) + prop := map[string]any{ + "address": "", + "db": 0, + "channels": []string{DefaultChannel}, + } + expErrStr := fmt.Sprintf("Ping Redis failed with error") + err := s.Ping("new", prop) + if err == nil { + t.Errorf("should have error") + return + } else { + errorMsg := fmt.Sprintf("%v", err) + parts := strings.SplitN(errorMsg, ":", 2) + if parts[0] != expErrStr { + t.Errorf("error mismatch:\n\nexp=%s\n\ngot=%s\n\n", expErrStr, parts[0]) + } + } +} + +func TestRun(t *testing.T) { + exp := []api.MessageTuple{ + model.NewDefaultRawTuple([]byte("{\"timestamp\": 1646125996000, \"node_name\": \"node1\", \"group_name\": \"group1\", \"values\": {\"tag_name1\": 11.22, \"tag_name2\": \"yellow\"}, \"errors\": {\"tag_name3\": 122}}"), map[string]any{ + "channel": "TestChannel", + }, timex.GetNow()), + } + s := RedisSub() + mock.TestSourceConnector(t, s, map[string]any{ + "address": addr, + "db": 0, + "channels": []string{DefaultChannel}, + }, exp, func() { + mockRedisPubSub(true, false, DefaultChannel) + }) +} diff --git a/internal/io/redis/redis_test.go b/internal/io/redis/redis_test.go new file mode 100644 index 0000000000..1c22e9a549 --- /dev/null +++ b/internal/io/redis/redis_test.go @@ -0,0 +1,82 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "time" + + "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" + _ "go.nanomsg.org/mangos/v3/transport/ipc" + + "github.com/lf-edge/ekuiper/v2/internal/topo/context" +) + +const ( + DefaultChannel = "TestChannel" +) + +var data = [][]byte{ + []byte("{\"timestamp\": 1646125996000, \"node_name\": \"node1\", \"group_name\": \"group1\", \"values\": {\"tag_name1\": 11.22, \"tag_name2\": \"yellow\"}, \"errors\": {\"tag_name3\": 122}}"), +} + +var ( + addr string + port string + mr *miniredis.Miniredis +) + +func mockRedisPubSub(pub bool, sub bool, channel string) (*redis.Client, chan []byte) { + var ( + client *redis.Client + subscribe *redis.PubSub + ch chan []byte + ) + ctx := context.Background() + client = redis.NewClient(&redis.Options{ + Addr: addr, + Password: "", + }) + subscribe = client.Subscribe(ctx, channel) + + if sub { + ch = make(chan []byte) + go func() { + for { + message, err := subscribe.ReceiveMessage(ctx) + if err != nil { + return + } + fmt.Printf("Redis RECEIVED: \"%s\"\n", message.Payload) + ch <- []byte(message.Payload) + fmt.Println("Redis Sent out") + } + }() + } + if pub { + go func() { + var msg []byte + for { + for _, msg = range data { + fmt.Printf("Redis Publish: \"%s\"\n", string(msg)) + client.Publish(ctx, channel, msg) + time.Sleep(10 * time.Millisecond) + } + } + }() + } + return client, ch +} diff --git a/internal/io/redis/sink.go b/internal/io/redis/sink.go new file mode 100644 index 0000000000..bff4235209 --- /dev/null +++ b/internal/io/redis/sink.go @@ -0,0 +1,205 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/redis/go-redis/v9" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/ast" + "github.com/lf-edge/ekuiper/v2/pkg/cast" +) + +type config struct { + // host:port address. + Addr string `json:"addr,omitempty"` + Username string `json:"username,omitempty"` + // Optional password. Must match the password specified in the + Password string `json:"password,omitempty"` + // Database to be selected after connecting to the server. + Db int `json:"db,omitempty"` + // key of field + Field string `json:"field,omitempty"` + // key define + Key string `json:"key,omitempty"` + KeyType string `json:"keyType,omitempty"` + DataType string `json:"dataType,omitempty"` + Expiration time.Duration `json:"expiration,omitempty"` + RowkindField string `json:"rowkindField"` + DataTemplate string `json:"dataTemplate"` + Fields []string `json:"fields"` + DataField string `json:"dataField"` +} + +type RedisSink struct { + c *config + cli *redis.Client +} + +func (r *RedisSink) Provision(_ api.StreamContext, props map[string]any) error { + return r.Validate(props) +} + +func (r *RedisSink) Connect(ctx api.StreamContext) error { + logger := ctx.GetLogger() + logger.Debug("Opening redis sink") + + r.cli = redis.NewClient(&redis.Options{ + Addr: r.c.Addr, + Username: r.c.Username, + Password: r.c.Password, + DB: r.c.Db, // use default DB + }) + _, err := r.cli.Ping(ctx).Result() + return err +} + +func (r *RedisSink) Validate(props map[string]any) error { + c := &config{DataType: "string", Expiration: -1, KeyType: "single"} + err := cast.MapToStruct(props, c) + if err != nil { + return err + } + if c.Db < 0 || c.Db > 15 { + return fmt.Errorf("redisSink db should be in range 0-15") + } + if c.KeyType == "single" && c.Key == "" && c.Field == "" { + return errors.New("redis sink must have key or field when KeyType is single") + } + if c.KeyType != "single" && c.KeyType != "multiple" { + return errors.New("KeyType only support single or multiple") + } + if c.DataType != "string" && c.DataType != "list" { + return errors.New("redis sink only support string or list data type") + } + r.c = c + return nil +} + +func (r *RedisSink) Collect(ctx api.StreamContext, item api.MessageTuple) error { + return r.save(ctx, item.ToMap()) +} + +func (r *RedisSink) CollectList(ctx api.StreamContext, items api.MessageTupleList) error { + // TODO handle partial error + items.RangeOfTuples(func(_ int, tuple api.MessageTuple) bool { + err := r.save(ctx, tuple.ToMap()) + if err != nil { + ctx.GetLogger().Error(err) + } + return true + }) + return nil +} + +func (r *RedisSink) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("Closing redis sink") + err := r.cli.Close() + return err +} + +func (r *RedisSink) save(ctx api.StreamContext, data map[string]any) error { + logger := ctx.GetLogger() + // prepare key value pairs + values := make(map[string]string) + if r.c.KeyType == "multiple" { + for key, val := range data { + v, _ := cast.ToString(val, cast.CONVERT_ALL) + values[key] = v + } + } else { + jsonBytes, err := json.Marshal(data) + if err != nil { + return err + } + val := string(jsonBytes) + key := r.c.Key + if r.c.Field != "" { + keyval, ok := data[r.c.Field] + if !ok { + return fmt.Errorf("field %s does not exist in data %v", r.c.Field, data) + } + key, err = cast.ToString(keyval, cast.CONVERT_ALL) + if err != nil { + return fmt.Errorf("key must be string or convertible to string, but got %v", keyval) + } + } + values[key] = val + } + // get action type + rowkind := ast.RowkindUpsert + if r.c.RowkindField != "" { + c, ok := data[r.c.RowkindField] + if ok { + rowkind, ok = c.(string) + if !ok { + return fmt.Errorf("rowkind field %s is not a string in data %v", r.c.RowkindField, data) + } + if rowkind != ast.RowkindInsert && rowkind != ast.RowkindUpdate && rowkind != ast.RowkindDelete && rowkind != ast.RowkindUpsert { + return fmt.Errorf("invalid rowkind %s", rowkind) + } + } + } + // set key value pairs + for key, val := range values { + var err error + switch rowkind { + case ast.RowkindInsert, ast.RowkindUpdate, ast.RowkindUpsert: + if r.c.DataType == "list" { + err = r.cli.LPush(ctx, key, val).Err() + if err != nil { + return fmt.Errorf("lpush %s:%s error, %v", key, val, err) + } + logger.Debugf("push redis list success, key:%s data: %v", key, val) + } else { + err = r.cli.Set(ctx, key, val, r.c.Expiration*time.Second).Err() + if err != nil { + return fmt.Errorf("set %s:%s error, %v", key, val, err) + } + logger.Debugf("set redis string success, key:%s data: %s", key, val) + } + case ast.RowkindDelete: + if r.c.DataType == "list" { + err = r.cli.LPop(ctx, key).Err() + if err != nil { + return fmt.Errorf("lpop %s error, %v", key, err) + } + logger.Debugf("pop redis list success, key:%s data: %v", key, val) + } else { + err = r.cli.Del(ctx, key).Err() + if err != nil { + logger.Error(err) + return err + } + logger.Debugf("delete redis string success, key:%s data: %s", key, val) + } + default: + // never happen + logger.Errorf("unexpected rowkind %s", rowkind) + } + } + return nil +} + +func GetSink() api.Sink { + return &RedisSink{} +} + +var _ api.TupleCollector = &RedisSink{} diff --git a/internal/io/redis/sink_test.go b/internal/io/redis/sink_test.go new file mode 100644 index 0000000000..6710d77956 --- /dev/null +++ b/internal/io/redis/sink_test.go @@ -0,0 +1,449 @@ +// Copyright 2022-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/lf-edge/ekuiper/v2/internal/xsql" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" +) + +func TestSink(t *testing.T) { + s := &RedisSink{} + ctx := mockContext.NewMockContext("testSink", "op") + err := s.Provision(ctx, map[string]any{ + "addr": addr, + "key": "test", + }) + if err != nil { + t.Error(err) + return + } + err = s.Connect(ctx) + assert.NoError(t, err) + tests := []struct { + n string + c map[string]any + d any + k string + v any + }{ + { + n: "case1", + c: map[string]any{"key": "1"}, + d: map[string]any{"id": 1, "name": "John", "address": 34, "mobile": "334433"}, + k: "1", + v: `{"address":34,"id":1,"mobile":"334433","name":"John"}`, + }, + { + n: "case2", + c: map[string]any{"field": "id"}, + d: map[string]any{"id": 2, "name": "Susan", "address": 34, "mobile": "334433"}, + k: "2", + v: `{"address":34,"id":2,"mobile":"334433","name":"Susan"}`, + }, + { + n: "case3", + c: map[string]any{"field": "name", "datatype": "list"}, + d: map[string]any{"id": 3, "name": "Susan"}, + k: "Susan", + v: `{"id":3,"name":"Susan"}`, + }, + { + n: "case4", + c: map[string]any{"field": "id", "datatype": "list"}, + d: []map[string]any{ + {"id": 4, "name": "Susan"}, + {"id": 4, "name": "Bob"}, + {"id": 4, "name": "John"}, + }, + k: "4", + v: `{"id":4,"name":"John"}`, + }, + { + n: "case5", + c: map[string]any{"field": "id", "datatype": "string"}, + d: []map[string]any{ + {"id": 25, "name": "Susan"}, + {"id": 25, "name": "Bob"}, + {"id": 25, "name": "John"}, + }, + k: "25", + v: `{"id":25,"name":"John"}`, + }, + } + for _, tt := range tests { + t.Run(tt.n, func(t *testing.T) { + err = cast.MapToStruct(tt.c, s.c) + assert.NoError(t, err) + switch dd := tt.d.(type) { + case map[string]any: + err = s.Collect(ctx, &xsql.Tuple{ + Message: dd, + }) + case []map[string]any: + result := &xsql.WindowTuples{ + Content: make([]xsql.Row, 0, len(dd)), + } + for _, m := range dd { + result.Content = append(result.Content, &xsql.Tuple{ + Message: m, + }) + } + err = s.CollectList(ctx, result) + } + assert.NoError(t, err) + var ( + r string + err error + ) + switch tt.c["datatype"] { + case "list": + r, err = mr.Lpop(tt.k) + default: + r, err = mr.Get(tt.k) + } + assert.NoError(t, err) + assert.Equal(t, tt.v, r) + }) + } +} + +func TestSinkMultipleFields(t *testing.T) { + s := &RedisSink{} + ctx := mockContext.NewMockContext("testSink", "op") + err := s.Provision(ctx, map[string]any{ + "addr": addr, + "key": "test", + }) + assert.NoError(t, err) + err = s.Connect(ctx) + assert.NoError(t, err) + tests := []struct { + n string + c map[string]any + d any + kvPair map[string]any + }{ + { + n: "case1", + c: map[string]any{"keyType": "multiple"}, + d: map[string]any{"id": 1, "name": "John", "address": 34, "mobile": "334433"}, + kvPair: map[string]any{"id": "1", "name": "John", "address": "34", "mobile": "334433"}, + }, + { + n: "case2", + c: map[string]any{"keyType": "multiple", "datatype": "string"}, + d: []map[string]any{ + {"id": 24, "name": "Susan"}, + {"id": 25, "name": "Bob"}, + {"id": 26, "name": "John"}, + }, + kvPair: map[string]any{"id": "26", "name": "John"}, + }, + { + n: "case3", + c: map[string]any{"datatype": "list", "keyType": "multiple"}, + d: map[string]any{ + "listId": 4, "listName": "Susan", + }, + kvPair: map[string]any{"listId": "4", "listName": "Susan"}, + }, + { + n: "case4", + c: map[string]any{"datatype": "list", "keyType": "multiple"}, + d: []map[string]any{ + {"listId": 4, "listName": "Susan"}, + {"listId": 5, "listName": "Bob"}, + {"listId": 6, "listName": "John"}, + }, + kvPair: map[string]any{"listId": "6", "listName": "John"}, + }, + } + for _, tt := range tests { + t.Run(tt.n, func(t *testing.T) { + err = cast.MapToStruct(tt.c, s.c) + assert.NoError(t, err) + switch dd := tt.d.(type) { + case map[string]any: + err = s.Collect(ctx, &xsql.Tuple{ + Message: dd, + }) + case []map[string]any: + result := &xsql.WindowTuples{ + Content: make([]xsql.Row, 0, len(dd)), + } + for _, m := range dd { + result.Content = append(result.Content, &xsql.Tuple{ + Message: m, + }) + } + err = s.CollectList(ctx, result) + } + assert.NoError(t, err) + var ( + r string + err error + ) + for k, v := range tt.kvPair { + switch tt.c["datatype"] { + case "list": + r, err = mr.Lpop(k) + default: + r, err = mr.Get(k) + } + assert.NoError(t, err) + assert.Equal(t, v, r) + } + }) + } +} + +func TestUpdateString(t *testing.T) { + s := &RedisSink{} + ctx := mockContext.NewMockContext("testSink", "op") + err := s.Provision(ctx, map[string]any{ + "addr": addr, + "field": "id", + "rowkindField": "action", + }) + assert.NoError(t, err) + err = s.Connect(ctx) + assert.NoError(t, err) + tests := []struct { + n string + d any + k string + v any + }{ + { + n: "case1", + d: map[string]any{ // add without action + "id": "testUpdate1", "name": "Susan", + }, + k: "testUpdate1", + v: `{"id":"testUpdate1","name":"Susan"}`, + }, + { + n: "case2", + d: map[string]any{ // update with action + "action": "update", "id": "testUpdate1", "name": "John", + }, + k: "testUpdate1", + v: `{"action":"update","id":"testUpdate1","name":"John"}`, + }, + { + n: "case3", + d: map[string]any{ // delete + "action": "delete", "id": "testUpdate1", + }, + k: "testUpdate1", + v: ``, + }, + { + n: "case4", + d: []map[string]any{ // multiple actions + {"action": "delete", "id": "testUpdate1"}, + {"action": "insert", "id": "testUpdate1", "name": "Susan"}, + }, + k: "testUpdate1", + v: `{"action":"insert","id":"testUpdate1","name":"Susan"}`, + }, + } + for _, tt := range tests { + switch dd := tt.d.(type) { + case map[string]any: + err = s.Collect(ctx, &xsql.Tuple{ + Message: dd, + }) + case []map[string]any: + result := &xsql.WindowTuples{ + Content: make([]xsql.Row, 0, len(dd)), + } + for _, m := range dd { + result.Content = append(result.Content, &xsql.Tuple{ + Message: m, + }) + } + err = s.CollectList(ctx, result) + } + assert.NoError(t, err) + r, err := mr.Get(tt.k) + if tt.v == "" { + assert.EqualError(t, err, "ERR no such key") + } else { + assert.NoError(t, err) + assert.Equal(t, tt.v, r) + } + } +} + +func TestUpdateList(t *testing.T) { + s := &RedisSink{} + ctx := mockContext.NewMockContext("testSink", "op") + err := s.Provision(ctx, map[string]any{ + "addr": addr, + "field": "id", + "datatype": "list", + "rowkindField": "action", + }) + assert.NoError(t, err) + err = s.Connect(ctx) + assert.NoError(t, err) + tests := []struct { + n string + d any + k string + v []string + }{ + { + n: "case1", + d: map[string]any{ // add without action + "id": "testUpdateList", "name": "Susan", + }, + k: "testUpdateList", + v: []string{`{"id":"testUpdateList","name":"Susan"}`}, + }, + { + n: "case2", + d: map[string]any{ // update with action + "action": "update", "id": "testUpdateList", "name": "John", + }, + k: "testUpdateList", + v: []string{`{"action":"update","id":"testUpdateList","name":"John"}`, `{"id":"testUpdateList","name":"Susan"}`}, + }, + { + n: "case3", + d: map[string]any{ // delete + "action": "delete", "id": "testUpdateList", + }, + k: "testUpdateList", + v: []string{`{"id":"testUpdateList","name":"Susan"}`}, + }, + { + n: "case4", + d: []map[string]any{ // multiple actions + {"action": "delete", "id": "testUpdateList"}, + {"action": "insert", "id": "testUpdateList", "name": "Susan"}, + }, + k: "testUpdateList", + v: []string{`{"action":"insert","id":"testUpdateList","name":"Susan"}`}, + }, + { + n: "case5", + d: map[string]any{ // delete + "action": "delete", "id": "testUpdateList", + }, + k: "testUpdateList", + v: nil, + }, + } + for _, tt := range tests { + switch dd := tt.d.(type) { + case map[string]any: + err = s.Collect(ctx, &xsql.Tuple{ + Message: dd, + }) + case []map[string]any: + result := &xsql.WindowTuples{ + Content: make([]xsql.Row, 0, len(dd)), + } + for _, m := range dd { + result.Content = append(result.Content, &xsql.Tuple{ + Message: m, + }) + } + err = s.CollectList(ctx, result) + } + assert.NoError(t, err) + r, err := mr.List(tt.k) + if tt.v == nil { + assert.EqualError(t, err, "ERR no such key") + } else { + assert.NoError(t, err) + assert.Equal(t, tt.v, r) + } + } +} + +func TestRedisSink_Configure(t *testing.T) { + type args struct { + props map[string]any + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "missing key and field and default keyType is single", + args: args{map[string]any{ + "addr": addr, + "datatype": "list", + }}, + wantErr: true, + }, + { + name: "missing key and field and keyType is multiple", + args: args{map[string]any{ + "addr": addr, + "datatype": "list", + "keyType": "multiple", + }}, + wantErr: false, + }, + { + name: "key type do not support", + args: args{map[string]any{ + "addr": addr, + "datatype": "list", + "keyType": "ttt", + }}, + wantErr: true, + }, + { + name: "data type do not support", + args: args{map[string]any{ + "addr": addr, + "datatype": "stream", + "keyType": "multiple", + }}, + wantErr: true, + }, + } + ctx := mockContext.NewMockContext("TestConfigure", "op") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &RedisSink{ + c: nil, + } + if err := r.Provision(ctx, tt.args.props); (err != nil) != tt.wantErr { + t.Errorf("Configure() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestRedisSink(t *testing.T) { + s := &RedisSink{} + err := s.Validate(map[string]any{"db": 199}) + require.Error(t, err) + require.Equal(t, "redisSink db should be in range 0-15", err.Error()) +} diff --git a/test/run_jmeter.sh b/test/run_jmeter.sh index e2d6a444dd..1c2349b54b 100755 --- a/test/run_jmeter.sh +++ b/test/run_jmeter.sh @@ -153,8 +153,8 @@ echo -e "---------------------------------------------\n" /opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t test/lookup_table_memory.jmx -Dfvt="$fvt_dir" -l jmeter_logs/lookup_table_memory.jtl -j jmeter_logs/lookup_table_memory.log echo -e "---------------------------------------------\n" -#/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t test/lookup_table_redis.jmx -Dfvt="$fvt_dir" -l jmeter_logs/lookup_table_redis.jtl -j jmeter_logs/lookup_table_redis.log -#echo -e "---------------------------------------------\n" +/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t test/lookup_table_redis.jmx -Dfvt="$fvt_dir" -l jmeter_logs/lookup_table_redis.jtl -j jmeter_logs/lookup_table_redis.log +echo -e "---------------------------------------------\n" # #/opt/jmeter/bin/jmeter.sh -Jjmeter.save.saveservice.output_format=xml -n -t test/lookup_table_sql.jmx -Dfvt="$fvt_dir" -l jmeter_logs/lookup_table_sql.jtl -j jmeter_logs/lookup_table_sql.log #echo -e "---------------------------------------------\n" From ebce2e09fc81f8fa973b9e5bd410742ae94defff Mon Sep 17 00:00:00 2001 From: ngjaying Date: Tue, 23 Jul 2024 16:05:13 +0800 Subject: [PATCH 2/3] feat(ext): restore image sink (#3035) Signed-off-by: Jiyong Huang --- extensions/impl/image/ekuiper.jpg | Bin 0 -> 5785 bytes extensions/impl/image/image.go | 229 +++++++++++++++++++++ extensions/impl/image/image_test.go | 304 ++++++++++++++++++++++++++++ extensions/sinks/image/image.go | 24 +++ extensions/sinks/image/image.json | 90 ++++++++ internal/binder/io/ext_full.go | 3 +- 6 files changed, 649 insertions(+), 1 deletion(-) create mode 100644 extensions/impl/image/ekuiper.jpg create mode 100644 extensions/impl/image/image.go create mode 100644 extensions/impl/image/image_test.go create mode 100644 extensions/sinks/image/image.go create mode 100644 extensions/sinks/image/image.json diff --git a/extensions/impl/image/ekuiper.jpg b/extensions/impl/image/ekuiper.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbb4ac2c4be0338938525aaed8e852a00795bd4d GIT binary patch literal 5785 zcmeHLdpMM9yMGxXv?Y*z>pMCB3dhctldEUeG{NDSyfA{^n!O!3@ zAbiHe%mm=z-~j%9fJ4A30LIC=c|r#lbaL}@b8~TV^YQR(;}zf&5a8$I=iec?Q+S7< zh#)_|u(+_uE>STtF##b7NpVrhouXo*n=j#jLC*NWM6FUe_M@eSY3CoUl=C4E5tpu!y~71Z*Q;*V(C!}B{rm%N1_s>=k~ou#<4z>!e2-0S8Mk1(te4N`Kc`{@ngrcLRc-A%%u;mj@db9>>TN~SRXRo zk7h?{j?!wVeyr?|)#N_BDwW};cuu$xx!cRh^GCHG#`z`<4lt!C@H)~PvqCuu@j!&4 z%LPq+Lk4R|+-_g7hK8Dnx7BVq)_xBq!LqEcMvPv>|CaeV$trEc-bXU%W6Ohxfo`j# zGJcIp_`Acp8|sDec``;EueFl9GJEw{UfgD`KgFWnNd!`06bzRnp)Tqjk4^shwVq-EpI(SAY zh5j4_F&aY4XNZ|>VVOm%V`2z)=7g&j5`Gp|LoG+(0&lp*h{yom_T?c&cSU@FBx z;C{gESE}cTxHfhl&0HYj@$s;G!iHSlhFn*ra|~xr`n(j=J(gF1$wwNCii;wRfj=6` zhW#GFt+m6pPAw?Uxj5o{WmTH>8)I*aMB|;waSL$|pU3hbw1;Qc!W+kffPW!dD$If-4GwFTXRvV zaCgN-^?cHLlK-us$)-){>~>?hz`^6(p{G#lVU1X-E(i?a+5LpP>lc{BruXkQM6e4W zFfC$mfmN#2?jC^Po7uisRb}7Xa5Eivs;v9KHqE;cO3T~AZUpl&%4*VCM8u>e2$XlR zvQX;1rt?gRgUSyC#}_HsX{ME}!xl*zeb*;x>v+tmnwK$huA5UBkT!L*be*6h@%9e;~2(A9gdRCUpO zjyUTXsvH)Gba}sF#Z)g|5{mhmP#Tp(><#cwY|P%fKb4oNU8I(ODhqay$6J!aIOx6R ztnZgl<=*YqQm01JX4j4^2Xlw^ZD@f&>#n6K);kEd1##)s)}hi=##$74`A{ARpd{Ey zn!Sb8s?h}Iqb|rLE>sXOJsuX-KNZ*rXB^7h44LXFc6{(@25JWge51feP^o{2wThT& zwgk4-VkvXz!zv5Js4-(dKOI3j(5~@l<*hyH*0beqhFWY))w7 zt188mYC07viadu!<1N|0bth(0^)rEuhHvCX3I!cCj9^X@t^u*7#@Mxg&47;cOUUMY%qY+Ii*lF>i%Do^TD_ zPSd`dq|&MS!dM5lBq!XU>52DTWwt4CiMXHb_{RN*6Te%UBQk?}_jM>v_BM`uZ_Aa> zS$Z+|(WZn*HfMHe|1R?TvqP@atNkdL26c-cHsljstCc-J3#loIRD4Mo^(_2F_beba z@w9U$q5mx!hC1A)aM_2i*S>Q8mX235N{vuSx`t=$50in21kXB@Tq8z!1_EgEpmcq$ zqlJMKMY>h+>F)LeHQ5nCceHo6yY~eOJG2D4#XAYO{**fA)8rh}@rEd%toK~%kXHE{ z8%Gs_bGZSL(770cfRY|Wl9z9Hv1C?Pu~8s!9S`$3da>)p*RCQLUs+=Kx6>7arAdGt=jY5urM9JR0-1!}K@W zcw5R%bvq*B^mY735(JG8Oc2|%Dn1^JYl^hP8H*gMM|;;QZ_`eP zy%jk~m3OXVl+zv`J)&?%QN#Xm^~WT8k2xkT7_Wyk7BgMlw?dw?2La6lRs{&m2e03( z*+y5TIC@3yRn&PUniA~X!|UaVY&qbUxIZpZ<=t4|wabZBvNLe{b=3#>0sM7#AHHij zg#0=*9;I{h3***M$<3gRAwQQ7QQ_hi{?!Qk|X*eByoN zmT`v()%GO{EuENY1l<)2Wg53R_D~RkiE{)kUt| zH~4i{Y)0i=>yHwh#1vJJkl3h&vwL&8@0(0?8p`JX zkZF**I6~3#@ppB(SCMtt7S+#GLmJ=KsTCTS#6~9BOK>h*3xfzgfa$x4*al+-aT`#u+CR=j^xk0c#!UuB7OK2!8+f2j(o zK~v`Pqct9$7wVm+?e^zO9KKq5`Uxbj30K6QcC#sp6XYw_59e*#!6R;)dC4!o%|VZx z;@_9HKRX8Z=@Vu)-}6-#Zgdna(kyW1!q@0u>|i)E-Cv%qJvDPq>_(l1HsW#O)6?-- zv6&PfR7VWiwb${8_*8n~F&;sm@@Igsx5K6WJrn}%2KK>Gi%Qx%{k7P8MY)- zk3`QJ%`e4lmo4WISd-B3-FHt|s%%Qxw~aGCH|{45({A!o6I+ViKENt3#JV$%g>^r9 zhu87?woF6cxHUx59SaN=N`9~VRJnDgK(Zi{`l(gN?duR5-olWECO8-ZO@JDG>5pN^ zzUAhkt*I}`-@}QOO)2{3iXG?GmAjN_6Y<@f9^+7eB%E~W##M$HLKTevKeFHEFWfw= zv8=edKeotGa&T0O_)uO^r{MLYHxVB>k!E{n35F=7eFy3_irI;vL1~OQJ3W|;J?vBB zWzc}yAgvo&KLG(xYeo`w9;zVR?K=Wirhb=^(-1d?2@K)OP$Vw0R18r8Veb*kMoc{r z@ItKH9u863IwG*dGy#D`OT;>zw33>cfyy9@VxgL{ie0ycU)P&mF(DnRQGiMv3y5MO z7ZEFdJ*I7RFZLQliG0k?D}P|GtpXvRMf#0GNNFr|@G_8~ztJ0s_jgvAJn=^1l9cPU z^R)|-oI+PPipnamF{ot=Cd9p$Vo9u>s|P7;m%%0^hLOX_q1MDG6d+0xUVy+A5+fZu z-vt7g{$3(g;`apf9N(^w0RdU2DGO2^xkiix+D8yb)nStnw8)TVlvUUuo+=Mjuv6?0 zgeTKEG&Udf3AVx6-$0;ed~-w1*ZoEu3H@v56I-*N>xER3bv>R2IrX_le;sk_(1$Le z1}}yvbQ>WS1k6&W`xsv-rjQPcCC#bcf^=9W$q2Kxyp#(e+y{ZhpoAqt$K-!49~2XY zDCC95*m~7geD;y8VxcJekDj6NuohD2|8z{()(=G?&poH`n=N6Z0y2)-S43Dx*xuKP zU!Fv%90`hL-b=ju_H?t{-A6E7T}{JFG?ho@Y@xM&p^Z-|x~+H7z42H`U&7Y?K942p zG+tM2xFy5PpwMeK`m<9i){=f0rc=sTO#jR)H=&y4t599L}X zDY?0tgjsUmtVsL+PgkV9OEp9g;3Y?wK18){n9@SWb;7LXYpyl#DA(2PR8N=AS<`nq zU@K%J?D)2{W^5&St6N`=bcuQxsPy!n?I!Xb*{%Hx^#|{pxU90`nV(?-g@DN|u}MT} zb3?2uUy$I2Q4IB3MPdDTBiHq!c9`}7G|$3I%kM%q)^PvFNesAR?G>fTbKcz%QeBG3vL0fcK6fJ&y{*D-9F<;ABkAey8gsrl z0s>KoS0C;jYpsl(xccLh$x{!zE7xrAF7R>*pO7V!Mu>t=XpoV z;&Oemao!qc`XzhK^t>!MI(iY-BVQ_u>>hX5ehPf#b^BJiQV8MFpVX{ds@Nn0#Q!qZ zp|6c5pY<13gixmJW*b9-cXkwF$ZNp?VSyn?sF#om^%D5MVZToix}}*5 z^qkH91k~%1fFyBuml<4XgtYNZmZ?P5i&4nVXRSc%0{)6A^|6K7AJWR}hNM;oJP~Cp z_uK0+a+C7qzP@J+KGkKKIl1b`xs~5C7Tp_mjN^VszP^Eq%!g1!>nr<~b@K*tneqE? zseWGB?(j?RaAG24C#Pur6h)oHy&c+u!@21)oc0C%ojUfmC}&GQKdlsrU3xgP=T|!C xtjyK*ab}&Crp7$-7jVNGL2bH@Ha}eagP81 literal 0 HcmV?d00001 diff --git a/extensions/impl/image/image.go b/extensions/impl/image/image.go new file mode 100644 index 0000000000..3437b9b801 --- /dev/null +++ b/extensions/impl/image/image.go @@ -0,0 +1,229 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "bytes" + "context" + "errors" + "fmt" + "image/jpeg" + "image/png" + "os" + "path/filepath" + "strings" + "time" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +type c struct { + Path string `json:"path"` + ImageFormat string `json:"imageFormat"` + MaxAge int `json:"maxAge"` + MaxCount int `json:"maxCount"` +} + +type imageSink struct { + c *c + cancel context.CancelFunc +} + +func (m *imageSink) Provision(_ api.StreamContext, configs map[string]any) error { + conf := &c{ + MaxAge: 72, + MaxCount: 1000, + } + err := cast.MapToStruct(configs, conf) + if err != nil { + return err + } + if conf.Path == "" { + return errors.New("path is required") + } + if conf.ImageFormat != "png" && conf.ImageFormat != "jpeg" { + return fmt.Errorf("invalid image format: %s", conf.ImageFormat) + } + if conf.MaxAge < 0 { + return fmt.Errorf("invalid max age: %d", conf.MaxAge) + } + if conf.MaxCount < 0 { + return fmt.Errorf("invalid max count: %d", conf.MaxCount) + } + m.c = conf + return nil +} + +func (m *imageSink) Connect(ctx api.StreamContext) error { + if _, err := os.Stat(m.c.Path); os.IsNotExist(err) { + if err := os.MkdirAll(m.c.Path, os.ModePerm); nil != err { + return fmt.Errorf("fail to open image sink for %v", err) + } + } + + t := timex.GetTicker(time.Duration(3) * time.Minute) + exeCtx, cancel := ctx.WithCancel() + m.cancel = cancel + go func() { + defer t.Stop() + for { + select { + case <-t.C: + m.delFile(ctx.GetLogger()) + case <-exeCtx.Done(): + ctx.GetLogger().Info("image sink done") + return + } + } + }() + return nil +} + +func (m *imageSink) delFile(logger api.Logger) error { + logger.Debugf("deleting images") + dirEntries, err := os.ReadDir(m.c.Path) + if nil != err || 0 == len(dirEntries) { + logger.Error("read dir fail") + return err + } + + files := make([]os.FileInfo, 0, len(dirEntries)) + for _, entry := range dirEntries { + info, err := entry.Info() + if err != nil { + continue + } + files = append(files, info) + } + + pos := m.c.MaxCount + delTime := time.Now().Add(time.Duration(0-m.c.MaxAge) * time.Hour) + for i := 0; i < len(files); i++ { + for j := i + 1; j < len(files); j++ { + if files[i].ModTime().Before(files[j].ModTime()) { + files[i], files[j] = files[j], files[i] + } + } + if files[i].ModTime().Before(delTime) && i < pos { + pos = i + break + } + } + logger.Debugf("pos is %d, and file len is %d", pos, len(files)) + for i := pos; i < len(files); i++ { + fname := files[i].Name() + logger.Debugf("try to delete %s", fname) + if strings.HasSuffix(fname, m.c.ImageFormat) { + fpath := filepath.Join(m.c.Path, fname) + os.Remove(fpath) + } + } + return nil +} + +func (m *imageSink) getSuffix() string { + now := time.Now() + year, month, day := now.Date() + hour, minute, second := now.Clock() + nsecond := now.Nanosecond() + return fmt.Sprintf(`%d-%d-%d_%d-%d-%d-%d`, year, month, day, hour, minute, second, nsecond) +} + +func (m *imageSink) saveFile(b []byte, fpath string) error { + reader := bytes.NewReader(b) + switch m.c.ImageFormat { + case "png": + img, err := png.Decode(reader) + if err != nil { + return err + } + fp, err := os.Create(fpath) + if nil != err { + return err + } + defer fp.Close() + err = png.Encode(fp, img) + if err != nil { + os.Remove(fpath) + return err + } + case "jpeg": + img, err := jpeg.Decode(reader) + if err != nil { + return err + } + fp, err := os.Create(fpath) + if nil != err { + return err + } + defer fp.Close() + err = jpeg.Encode(fp, img, nil) + if err != nil { + os.Remove(fpath) + return err + } + default: + return fmt.Errorf("unsupported format %s", m.c.ImageFormat) + } + return nil +} + +func (m *imageSink) saveFiles(images map[string]interface{}) error { + for k, v := range images { + image, ok := v.([]byte) + if !ok { + return fmt.Errorf("found none bytes data %v for path %s", image, k) + } + suffix := m.getSuffix() + fname := fmt.Sprintf(`%s%s.%s`, k, suffix, m.c.ImageFormat) + fpath := filepath.Join(m.c.Path, fname) + err := m.saveFile(image, fpath) + if err != nil { + return err + } + } + return nil +} + +func (m *imageSink) Collect(ctx api.StreamContext, item api.MessageTuple) error { + return m.saveFiles(item.ToMap()) +} + +func (m *imageSink) CollectList(ctx api.StreamContext, items api.MessageTupleList) error { + // TODO handle partial errors + items.RangeOfTuples(func(_ int, tuple api.MessageTuple) bool { + err := m.saveFiles(tuple.ToMap()) + if err != nil { + ctx.GetLogger().Error(err) + } + return true + }) + return nil +} + +func (m *imageSink) Close(ctx api.StreamContext) error { + if m.cancel != nil { + m.cancel() + } + return m.delFile(ctx.GetLogger()) +} + +func GetSink() api.Sink { + return &imageSink{} +} + +var _ api.TupleCollector = &imageSink{} diff --git a/extensions/impl/image/image_test.go b/extensions/impl/image/image_test.go new file mode 100644 index 0000000000..47d6ff697a --- /dev/null +++ b/extensions/impl/image/image_test.go @@ -0,0 +1,304 @@ +// Copyright 2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "io/fs" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/lf-edge/ekuiper/v2/internal/xsql" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + name string + props map[string]any + c *c + err string + }{ + { + name: "wrong type", + props: map[string]any{ + "maxAge": "0.11", + }, + err: "1 error(s) decoding:\n\n* 'maxAge' expected type 'int', got unconvertible type 'string', value: '0.11'", + }, + { + name: "missing path", + props: map[string]any{ + "imageFormat": "jpeg", + }, + err: "path is required", + }, + { + name: "wrong format", + props: map[string]any{ + "path": "data", + "imageFormat": "abc", + }, + err: "invalid image format: abc", + }, + { + name: "default age", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + "maxCount": 1, + }, + c: &c{ + Path: "data", + ImageFormat: "png", + MaxCount: 1, + MaxAge: 72, + }, + }, + { + name: "default count", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + "maxAge": 0.11, + }, + c: &c{ + Path: "data", + ImageFormat: "png", + MaxCount: 1000, + MaxAge: 0, + }, + }, + { + name: "wrong max age", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + "maxAge": -1, + }, + err: "invalid max age: -1", + }, + { + name: "wrong max count", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + "maxCount": -1, + }, + err: "invalid max count: -1", + }, + } + s := &imageSink{} + ctx := mockContext.NewMockContext("testConfigure", "op") + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := s.Provision(ctx, test.props) + if test.err == "" { + assert.NoError(t, err) + assert.Equal(t, test.c, s.c) + } else { + assert.EqualError(t, err, test.err) + } + }) + } +} + +func TestSave(t *testing.T) { + tests := []struct { + name string + props map[string]any + image string + err string + }{ + { + name: "normal", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + }, + image: "../../../docs/en_US/wechat.png", + }, + { + name: "wrong format", + props: map[string]any{ + "path": "data", + "imageFormat": "jpeg", + }, + image: "../../../docs/en_US/wechat.png", + err: "invalid JPEG format: missing SOI marker", + }, + { + name: "normal jpeg", + props: map[string]any{ + "path": "data", + "imageFormat": "jpeg", + }, + image: "ekuiper.jpg", + }, + { + name: "wrong png", + props: map[string]any{ + "path": "data", + "imageFormat": "png", + }, + image: "ekuiper.jpg", + err: "png: invalid format: not a PNG file", + }, + } + ctx := mockContext.NewMockContext("testConfigure", "op") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := os.MkdirAll("data", os.ModePerm) + assert.NoError(t, err) + b, err := os.ReadFile(tt.image) + assert.NoError(t, err) + s := &imageSink{} + err = s.Provision(ctx, tt.props) + assert.NoError(t, err) + + err = s.saveFiles(map[string]any{ + "self": b, + }) + if tt.err == "" { + assert.NoError(t, err) + entries, err := os.ReadDir("data") + assert.NoError(t, err) + assert.Len(t, entries, 1) + } else { + assert.EqualError(t, err, tt.err) + entries, err := os.ReadDir("data") + assert.NoError(t, err) + assert.Len(t, entries, 0) + } + _ = os.RemoveAll("data") + }) + } +} + +func TestCollect(t *testing.T) { + const Path = "test" + s := &imageSink{} + ctx := mockContext.NewMockContext("testSink", "op") + err := s.Provision(ctx, map[string]any{ + "path": Path, + "imageFormat": "png", + "maxCount": 1, + }) + assert.NoError(t, err) + b, err := os.ReadFile("../../../docs/en_US/wechat.png") + assert.NoError(t, err) + err = s.Connect(ctx) + assert.NoError(t, err) + defer s.Close(ctx) + tests := []struct { + n string + d any + e string + c int + }{ + { + n: "normal", + d: map[string]any{ + "image": b, + }, + c: 1, + }, + { + n: "multiple", + d: map[string]any{ + "image1": b, + "image2": b, + }, + c: 2, + }, + { + n: "wrong format", + d: map[string]any{ + "wrong": "abc", + }, + c: 0, + e: "found none bytes data [] for path wrong", + }, + { + n: "list", + d: []map[string]any{ + { + "image1": b, + "image2": b, + }, + { + "image2": b, + }, + }, + c: 3, + }, + } + for _, test := range tests { + t.Run(test.n, func(t *testing.T) { + switch dd := test.d.(type) { + case map[string]any: + err = s.Collect(ctx, &xsql.Tuple{ + Message: dd, + }) + case []map[string]any: + result := &xsql.WindowTuples{ + Content: make([]xsql.Row, 0, len(dd)), + } + for _, m := range dd { + result.Content = append(result.Content, &xsql.Tuple{ + Message: m, + }) + } + err = s.CollectList(ctx, result) + } + if test.e == "" { + assert.NoError(t, err) + c, err := countFiles(Path) + assert.NoError(t, err) + assert.Equal(t, test.c, c) + } else { + assert.EqualError(t, err, test.e) + } + timex.Add(5 * time.Minute) + // wait for delete files, test max count + time.Sleep(10 * time.Millisecond) + c, _ := countFiles(Path) + if c > 1 { + assert.Fail(t, "should not have more than 1 after delete files") + } + os.RemoveAll(Path) + err = os.Mkdir(Path, os.ModePerm) + assert.NoError(t, err) + }) + } +} + +func countFiles(dir string) (int, error) { + count := 0 + err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + count++ + } + return nil + }) + return count, err +} diff --git a/extensions/sinks/image/image.go b/extensions/sinks/image/image.go new file mode 100644 index 0000000000..719fc4f9d6 --- /dev/null +++ b/extensions/sinks/image/image.go @@ -0,0 +1,24 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/image" +) + +func Image() api.Sink { + return image.GetSink() +} diff --git a/extensions/sinks/image/image.json b/extensions/sinks/image/image.json new file mode 100644 index 0000000000..661aeba469 --- /dev/null +++ b/extensions/sinks/image/image.json @@ -0,0 +1,90 @@ +{ + "about": { + "trial": true, + "author": { + "name": "EMQ", + "email": "contact@emqx.io", + "company": "EMQ Technologies Co., Ltd", + "website": "https://www.emqx.io" + }, + "helpUrl": { + "en_US": "https://ekuiper.org/docs/en/latest/guide/sinks/plugin/image.html", + "zh_CN": "https://ekuiper.org/docs/zh/latest/guide/sinks/plugin/image.html" + }, + "description": { + "en_US": "This sink is used to save the picture to the specified folder.", + "zh_CN": "本插件用于将图片保存到指定文件夹。" + } + }, + "libs": [ + ], + "properties": [{ + "name": "path", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The name of the folder where the pictures are saved, such as ./tmp. Note: For multiple rules, their paths cannot be repeated, otherwise they will be deleted from each other.", + "zh_CN": "保存图片的文件夹名,例如 ./tmp。注意:多条 rule 路径不能重复,否则会出现彼此删除的现象。" + }, + "label": { + "en_US": "Path of file", + "zh_CN": "文件路径" + } + }, { + "name": "imageFormat", + "default": "jpeg", + "optional": true, + "control": "select", + "values": [ + "jpeg", + "png" + ], + "type": "string", + "hint": { + "en_US": "File format, support jpeg and png.", + "zh_CN": "文件格式,支持 jpeg 和 png。" + }, + "label": { + "en_US": "The format of image", + "zh_CN": "图片格式" + } + },{ + "name": "maxAge", + "default": 72, + "optional": true, + "control": "text", + "type": "int", + "hint": { + "en_US": "Maximum file storage time (hours). The default value is 72, which means that the picture can be stored for up to 3 days.", + "zh_CN": "最长文件存储时间(小时)。默认值为 72,这表示图片最多保存3天。" + }, + "label": { + "en_US": "maxAge", + "zh_CN": "最长保留时间" + } + },{ + "name": "maxCount", + "default": 1000, + "optional": true, + "control": "text", + "type": "int", + "hint": { + "en_US": "The maximum number of stored pictures. The default value is 1000. The earlier pictures will be deleted. The relationship with maxAge is OR.", + "zh_CN": "存储图片的最大数量,默认值是 1000,删除时间较早的图片,与 maxAge 是或的关系。" + }, + "label": { + "en_US": "maxCount", + "zh_CN": "最大写入数量" + } + }], + "node": { + "category": "sink", + "icon": "iconPath", + "label": { + "en": "Image", + "zh": "图像" + } + } +} diff --git a/internal/binder/io/ext_full.go b/internal/binder/io/ext_full.go index 15b773eb95..367c956dd5 100644 --- a/internal/binder/io/ext_full.go +++ b/internal/binder/io/ext_full.go @@ -18,6 +18,7 @@ package io import ( "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/image" sql2 "github.com/lf-edge/ekuiper/v2/extensions/impl/sql" "github.com/lf-edge/ekuiper/v2/extensions/impl/sql/client" "github.com/lf-edge/ekuiper/v2/extensions/impl/video" @@ -29,7 +30,7 @@ func init() { modules.RegisterSource("video", func() api.Source { return video.GetSource() }) //modules.RegisterSource("kafka", func() api.Source { return kafkaSrc.GetSource() }) //modules.RegisterLookupSource("sql", func() api.LookupSource { return sql.GetLookup() }) - //modules.RegisterSink("image", func() api.Sink { return image.GetSink() }) + modules.RegisterSink("image", func() api.Sink { return image.GetSink() }) //modules.RegisterSink("influx", func() api.Sink { return influx.GetSink() }) //modules.RegisterSink("influx2", func() api.Sink { return influx2.GetSink() }) //modules.RegisterSink("kafka", func() api.Sink { return kafka.GetSink() }) From bddc2a1d75610a99d82948ddd70c85b6c31e9b41 Mon Sep 17 00:00:00 2001 From: ngjaying Date: Tue, 23 Jul 2024 16:27:55 +0800 Subject: [PATCH 3/3] feat(ext): restore influx/influx2 sink (#3036) Signed-off-by: Jiyong Huang --- extensions/impl/influx/influx.go | 176 +++++++ extensions/impl/influx/influx_test.go | 429 ++++++++++++++++ extensions/impl/influx2/influx2.go | 236 +++++++++ extensions/impl/influx2/influx2_test.go | 599 ++++++++++++++++++++++ extensions/impl/tspoint/transform.go | 163 ++++++ extensions/impl/tspoint/transform_test.go | 73 +++ extensions/sinks/influx/influx.go | 56 ++ extensions/sinks/influx/influx.json | 237 +++++++++ extensions/sinks/influx2/influx2.go | 58 +++ extensions/sinks/influx2/influx2.json | 267 ++++++++++ go.mod | 5 + go.sum | 13 + internal/binder/io/ext_full.go | 6 +- 13 files changed, 2316 insertions(+), 2 deletions(-) create mode 100644 extensions/impl/influx/influx.go create mode 100644 extensions/impl/influx/influx_test.go create mode 100644 extensions/impl/influx2/influx2.go create mode 100644 extensions/impl/influx2/influx2_test.go create mode 100644 extensions/impl/tspoint/transform.go create mode 100644 extensions/impl/tspoint/transform_test.go create mode 100644 extensions/sinks/influx/influx.go create mode 100644 extensions/sinks/influx/influx.json create mode 100644 extensions/sinks/influx2/influx2.go create mode 100644 extensions/sinks/influx2/influx2.json diff --git a/extensions/impl/influx/influx.go b/extensions/impl/influx/influx.go new file mode 100644 index 0000000000..8bc6420eaf --- /dev/null +++ b/extensions/impl/influx/influx.go @@ -0,0 +1,176 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package influx + +import ( + "crypto/tls" + "fmt" + "time" + + client "github.com/influxdata/influxdb1-client/v2" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/tspoint" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/cert" +) + +// c is the configuration for influx2 sink +type c struct { + // connection + Addr string `json:"addr"` + Username string `json:"username"` + Password string `json:"password"` + // http connection + // tls conf in cert.go + // write options + Database string `json:"database"` + Measurement string `json:"measurement"` + tspoint.WriteOptions +} + +type influxSink struct { + conf c + tlsconf *tls.Config + // temp variables + bp client.BatchPoints + cli client.Client +} + +func (m *influxSink) Provision(ctx api.StreamContext, props map[string]any) error { + m.conf = c{ + WriteOptions: tspoint.WriteOptions{ + PrecisionStr: "ms", + }, + } + err := cast.MapToStruct(props, &m.conf) + if err != nil { + return fmt.Errorf("error configuring influx2 sink: %s", err) + } + if len(m.conf.Addr) == 0 { + return fmt.Errorf("addr is required") + } + if len(m.conf.Database) == 0 { + return fmt.Errorf("database is required") + } + if len(m.conf.Measurement) == 0 { + return fmt.Errorf("measurement is required") + } + err = cast.MapToStruct(props, &m.conf.WriteOptions) + if err != nil { + return fmt.Errorf("error configuring influx sink: %s", err) + } + err = m.conf.WriteOptions.Validate() + if err != nil { + return err + } + tlsConf, err := cert.GenTLSConfig(props, "influx-sink") + if err != nil { + return fmt.Errorf("error configuring tls: %s", err) + } + m.tlsconf = tlsConf + return nil +} + +func (m *influxSink) Connect(ctx api.StreamContext) (err error) { + var insecureSkip bool + if m.tlsconf != nil { + insecureSkip = m.tlsconf.InsecureSkipVerify + } + m.cli, err = client.NewHTTPClient(client.HTTPConfig{ + Addr: m.conf.Addr, + Username: m.conf.Username, + Password: m.conf.Password, + InsecureSkipVerify: insecureSkip, + TLSConfig: m.tlsconf, + }) + if err != nil { + return fmt.Errorf("error creating influx client: %s", err) + } + err = m.conf.WriteOptions.ValidateTagTemplates(ctx) + if err != nil { + return err + } + _, _, err = m.cli.Ping(time.Second * 10) + if err != nil { + return fmt.Errorf("error pinging influx server: %s", err) + } + m.bp, err = client.NewBatchPoints(client.BatchPointsConfig{ + Database: m.conf.Database, + Precision: m.conf.PrecisionStr, + }) + return err +} + +func (m *influxSink) Collect(ctx api.StreamContext, item api.MessageTuple) error { + return m.collect(ctx, item.ToMap()) +} + +func (m *influxSink) CollectList(ctx api.StreamContext, items api.MessageTupleList) error { + return m.collect(ctx, items.ToMaps()) +} + +func (m *influxSink) collect(ctx api.StreamContext, data any) error { + logger := ctx.GetLogger() + err := m.transformPoints(ctx, data) + if err != nil { + logger.Error(err) + return err + } + // Write the batch + err = m.cli.Write(m.bp) + if err != nil { + logger.Error(err) + return err + } + logger.Debug("influx insert success") + return nil +} + +func (m *influxSink) transformPoints(ctx api.StreamContext, data any) error { + var err error + m.bp, err = client.NewBatchPoints(client.BatchPointsConfig{ + Database: m.conf.Database, + Precision: m.conf.PrecisionStr, + }) + if err != nil { + return err + } + + rawPts, err := tspoint.SinkTransform(ctx, data, &m.conf.WriteOptions) + if err != nil { + ctx.GetLogger().Error(err) + return err + } + for _, rawPt := range rawPts { + pt, err := client.NewPoint(m.conf.Measurement, rawPt.Tags, rawPt.Fields, rawPt.Tt) + if err != nil { + return err + } + m.bp.AddPoint(pt) + } + return nil +} + +func (m *influxSink) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("influx sink close") + return m.cli.Close() +} + +func GetSink() api.Sink { + return &influxSink{} +} + +var _ api.TupleCollector = &influxSink{} diff --git a/extensions/impl/influx/influx_test.go b/extensions/impl/influx/influx_test.go new file mode 100644 index 0000000000..c310ce16b4 --- /dev/null +++ b/extensions/impl/influx/influx_test.go @@ -0,0 +1,429 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package influx + +import ( + "testing" + "time" + + client "github.com/influxdata/influxdb1-client/v2" + "github.com/stretchr/testify/assert" + + "github.com/lf-edge/ekuiper/v2/extensions/impl/tspoint" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +func TestConfig(t *testing.T) { + tests := []struct { + name string + conf map[string]interface{} + expected c + error string + }{ + { // 0 + name: "insecureSkipVerify", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "username": "name", + "password": "pass", + "measurement": "test", + "database": "db1", + "tags": map[string]interface{}{ + "tag": "value", + }, + "fields": []interface{}{"temperature"}, + "tsFieldName": "ts", + "insecureSkipVerify": true, + }, + expected: c{ + Addr: "http://192.168.0.3:8086", + Username: "name", + Password: "pass", + Database: "db1", + WriteOptions: tspoint.WriteOptions{Tags: map[string]string{"tag": "value"}, TsFieldName: "ts", PrecisionStr: "ms"}, + Measurement: "test", + }, + }, + { // 0 + name: "test1", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "username": "name", + "password": "pass", + "measurement": "test", + "database": "db1", + "tags": map[string]interface{}{ + "tag": "value", + }, + "fields": []interface{}{"temperature"}, + "tsFieldName": "ts", + }, + expected: c{ + Addr: "http://192.168.0.3:8086", + Username: "name", + Password: "pass", + Database: "db1", + WriteOptions: tspoint.WriteOptions{Tags: map[string]string{"tag": "value"}, TsFieldName: "ts", PrecisionStr: "ms"}, + Measurement: "test", + }, + }, + { + name: "unmarshall error", + conf: map[string]interface{}{ + "database": 12, + }, + error: "error configuring influx2 sink: 1 error(s) decoding:\n\n* 'database' expected type 'string', got unconvertible type 'int', value: '12'", + }, + { + name: "addr missing error", + conf: map[string]interface{}{}, + error: "addr is required", + }, + { + name: "database missing error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + }, + error: "database is required", + }, + { + name: "precision invalid error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "username": "user1", + "password": "pass1", + "database": "bucket_one", + "measurement": "mm", + "precision": "abc", + }, + error: "precision abc is not supported", + }, + { + name: "measurement missing error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "username": "user1", + "password": "pass1", + "database": "bucket_one", + "precision": "ns", + }, + error: "measurement is required", + }, + { + name: "unmarshall error for tls", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "username": "user1", + "password": "pass1", + "database": "bucket_one", + "precision": "ns", + "measurement": "mm", + "rootCaPath": 12, + }, + error: "error configuring tls: 1 error(s) decoding:\n\n* 'rootCaPath' expected type 'string', got unconvertible type 'int', value: '12'", + }, + } + ctx := mockContext.NewMockContext("testconfig", "op") + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink{} + err := ifsink.Provision(ctx, test.conf) + if test.error == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.Equal(t, test.error, err.Error()) + return + } + assert.Equal(t, test.expected, ifsink.conf) + }) + } +} + +func TestCollectPoints(t *testing.T) { + timex.Set(10) + tests := []struct { + name string + conf c + data any + points []tspoint.RawPoint + }{ + { + name: "normal", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "ms", + }, + Database: "db1", + }, + data: map[string]any{ + "temperature": 20, + "humidity": 50, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "temperature": 20, + "humidity": 50, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + Tt: time.UnixMilli(10), + }, + }, + }, + { + name: "normal batch", + conf: c{ + Measurement: "test2", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "s", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + "humidity": 50, + }, + { + "temperature": 30, + "humidity": 60, + }, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "temperature": 20, + "humidity": 50, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + Tt: time.UnixMilli(10), + }, + { + Fields: map[string]any{ + "temperature": 30, + "humidity": 60, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + Tt: time.UnixMilli(10), + }, + }, + }, + { + name: "normal batch sendSingle", + conf: c{ + Measurement: "test3", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "{{.humidity}}", + "tag2": "value2", + }, + PrecisionStr: "s", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + "humidity": 50, + "ts": 100, + }, + { + "temperature": 30, + "humidity": 60, + "ts": 110, + }, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "temperature": 20, + "humidity": 50, + "ts": 100, + }, + Tags: map[string]string{ + "tag1": "50", + "tag2": "value2", + }, + Tt: time.Unix(100, 0), + }, + { + Fields: map[string]any{ + "temperature": 30, + "humidity": 60, + "ts": 110, + }, + Tags: map[string]string{ + "tag1": "60", + "tag2": "value2", + }, + Tt: time.Unix(110, 0), + }, + }, + }, + { + name: "batch/sendSingle with dataTemplate", + conf: c{ + Measurement: "test4", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "us", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "t": 20, + "ts": 100, + }, + { + "t": 30, + "ts": 110, + }, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "t": 20, + "ts": 100, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + Tt: time.UnixMicro(100), + }, + { + Fields: map[string]any{ + "t": 30, + "ts": 110, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + Tt: time.UnixMicro(110), + }, + }, + }, + { + name: "single with fields", + conf: c{ + Measurement: "test5", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "{{.humidity}}", + }, + PrecisionStr: "ns", + TsFieldName: "ts", + }, + }, + data: map[string]any{ + "humidity": 50, + "ts": 100, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "humidity": 50, + "ts": 100, + }, + Tags: map[string]string{ + "tag1": "value1", + "tag2": "50", + }, + Tt: time.Unix(0, 100), + }, + }, + }, + { + name: "single with dataTemplate and dataField", + conf: c{ + Measurement: "test5", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "{{.t}}", + "tag2": "{{.h}}", + }, + PrecisionStr: "ns", + }, + }, + data: map[string]any{ + "t": 20, + "h": 50, + }, + points: []tspoint.RawPoint{ + { + Fields: map[string]any{ + "t": 20, + "h": 50, + }, + Tags: map[string]string{ + "tag1": "20", + "tag2": "50", + }, + Tt: time.UnixMilli(10), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink{ + conf: test.conf, + } + err := ifsink.conf.WriteOptions.Validate() + assert.NoError(t, err) + ctx := mockContext.NewMockContext(test.name, "op") + err = ifsink.conf.ValidateTagTemplates(ctx) + assert.NoError(t, err) + err = ifsink.transformPoints(ctx, test.data) + assert.NoError(t, err) + result, err := client.NewBatchPoints(client.BatchPointsConfig{ + Database: test.conf.Database, + Precision: test.conf.PrecisionStr, + }) + assert.NoError(t, err) + for _, p := range test.points { + pt, err := client.NewPoint(test.conf.Measurement, p.Tags, p.Fields, p.Tt) + assert.NoError(t, err) + result.AddPoint(pt) + } + assert.Equal(t, result, ifsink.bp) + }) + } +} diff --git a/extensions/impl/influx2/influx2.go b/extensions/impl/influx2/influx2.go new file mode 100644 index 0000000000..c1337b37f4 --- /dev/null +++ b/extensions/impl/influx2/influx2.go @@ -0,0 +1,236 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package influx2 + +import ( + "crypto/tls" + "fmt" + "strings" + "time" + + client "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api/write" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/tspoint" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/cert" + "github.com/lf-edge/ekuiper/v2/pkg/errorx" +) + +// c is the configuration for influx2 sink +type c struct { + // connection + Addr string `json:"addr"` + Token string `json:"token"` + Org string `json:"org"` + Bucket string `json:"bucket"` + PrecisionStr string `json:"precision"` + Precision time.Duration `json:"-"` + // http connection + // tls conf in cert.go + // write options + UseLineProtocol bool `json:"useLineProtocol"` // 0: json, 1: line protocol + Measurement string `json:"measurement"` + tspoint.WriteOptions + BatchSize int `json:"batchSize"` +} + +// influxSink2 is the sink for influx2. +// To ensure exact order, it uses blocking write api to write data to influxdb2. +type influxSink2 struct { + conf c + tlsconf *tls.Config + // save the token privately + cli client.Client +} + +func (m *influxSink2) Provision(ctx api.StreamContext, props map[string]any) error { + m.conf = c{ + PrecisionStr: "ms", + WriteOptions: tspoint.WriteOptions{ + PrecisionStr: "ms", + }, + } + err := cast.MapToStruct(props, &m.conf) + if err != nil { + return fmt.Errorf("error configuring influx2 sink: %s", err) + } + if len(m.conf.Addr) == 0 { + return fmt.Errorf("addr is required") + } + if len(m.conf.Org) == 0 { + return fmt.Errorf("org is required") + } + if len(m.conf.Bucket) == 0 { + return fmt.Errorf("bucket is required") + } + switch m.conf.PrecisionStr { + case "ms": + m.conf.Precision = time.Millisecond + case "s": + m.conf.Precision = time.Second + case "us": + m.conf.Precision = time.Microsecond + case "ns": + m.conf.Precision = time.Nanosecond + default: + return fmt.Errorf("precision %s is not supported", m.conf.PrecisionStr) + } + if len(m.conf.Measurement) == 0 { + return fmt.Errorf("measurement is required") + } + err = cast.MapToStruct(props, &m.conf.WriteOptions) + if err != nil { + return fmt.Errorf("error configuring influx2 sink: %s", err) + } + err = m.conf.WriteOptions.Validate() + if err != nil { + return err + } + tlsConf, err := cert.GenTLSConfig(props, "influx2-sink") + if err != nil { + return fmt.Errorf("error configuring tls: %s", err) + } + m.tlsconf = tlsConf + if m.conf.BatchSize <= 0 { + m.conf.BatchSize = 1 + } + return nil +} + +func (m *influxSink2) Connect(ctx api.StreamContext) error { + options := client.DefaultOptions().SetPrecision(m.conf.Precision).SetBatchSize(uint(m.conf.BatchSize)) + if m.tlsconf != nil { + options = options.SetTLSConfig(m.tlsconf) + } + m.cli = client.NewClientWithOptions(m.conf.Addr, m.conf.Token, options) + // Test connection + _, err := m.cli.Ping(ctx) + return err +} + +func (m *influxSink2) Collect(ctx api.StreamContext, item api.MessageTuple) error { + return m.collect(ctx, item.ToMap()) +} + +func (m *influxSink2) CollectList(ctx api.StreamContext, items api.MessageTupleList) error { + return m.collect(ctx, items.ToMaps()) +} + +func (m *influxSink2) collect(ctx api.StreamContext, data any) error { + logger := ctx.GetLogger() + // Write out with blocking API to keep order. Batch is done by sink node side + writeAPI := m.cli.WriteAPIBlocking(m.conf.Org, m.conf.Bucket) + if !m.conf.UseLineProtocol { + pts, err := m.transformPoints(ctx, data) + if err != nil { + return err + } + err = writeAPI.WritePoint(ctx, pts...) + if err != nil { + logger.Errorf("influx2 sink error: %v", err) + return errorx.NewIOErr(fmt.Sprintf(`influx2 sink fails to send out the data . %v`, err)) + } + } else { + lines, err := m.transformLines(ctx, data) + if err != nil { + return err + } + err = writeAPI.WriteRecord(ctx, lines...) + if err != nil { + logger.Errorf("influx2 sink error: %v", err) + return errorx.NewIOErr(fmt.Sprintf(`influx2 sink fails to send out the data . %v`, err.Error())) + } + } + logger.Debug("insert data into influxdb2 success") + return nil +} + +func (m *influxSink2) Close(ctx api.StreamContext) error { + ctx.GetLogger().Infof("influx2 sink close") + m.cli.Close() + return nil +} + +func (m *influxSink2) transformPoints(ctx api.StreamContext, data any) ([]*write.Point, error) { + rawPts, err := tspoint.SinkTransform(ctx, data, &m.conf.WriteOptions) + if err != nil { + ctx.GetLogger().Error(err) + return nil, err + } + pts := make([]*write.Point, 0, len(rawPts)) + for _, rawPt := range rawPts { + pts = append(pts, client.NewPoint(m.conf.Measurement, rawPt.Tags, rawPt.Fields, rawPt.Tt)) + } + return pts, nil +} + +func (m *influxSink2) transformLines(ctx api.StreamContext, data any) ([]string, error) { + rawPts, err := tspoint.SinkTransform(ctx, data, &m.conf.WriteOptions) + if err != nil { + ctx.GetLogger().Error(err) + return nil, err + } + lines := make([]string, 0, len(rawPts)) + for _, rawPt := range rawPts { + lines = append(lines, m.rawPtToLine(rawPt)) + } + return lines, nil +} + +func (m *influxSink2) rawPtToLine(rawPt *tspoint.RawPoint) string { + var builder strings.Builder + builder.WriteString(m.conf.Measurement) + + for k, v := range rawPt.Tags { + builder.WriteString(",") + builder.WriteString(k) + builder.WriteString("=") + builder.WriteString(v) + } + builder.WriteString(" ") + c := 0 + + for k, v := range rawPt.Fields { + c = writeLine(c, &builder, k, v) + } + + builder.WriteString(" ") + builder.WriteString(fmt.Sprintf("%d", rawPt.Ts)) + return builder.String() +} + +func writeLine(c int, builder *strings.Builder, k string, v any) int { + if c > 0 { + builder.WriteString(",") + } + c++ + builder.WriteString(k) + builder.WriteString("=") + switch value := v.(type) { + case string: + builder.WriteString(fmt.Sprintf("\"%s\"", value)) + default: + builder.WriteString(fmt.Sprintf("%v", value)) + } + return c +} + +func GetSink() api.Sink { + return &influxSink2{} +} + +var _ api.TupleCollector = &influxSink2{} diff --git a/extensions/impl/influx2/influx2_test.go b/extensions/impl/influx2/influx2_test.go new file mode 100644 index 0000000000..d059baa581 --- /dev/null +++ b/extensions/impl/influx2/influx2_test.go @@ -0,0 +1,599 @@ +// Copyright 2022-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package influx2 + +import ( + "testing" + "time" + + client "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/stretchr/testify/assert" + + "github.com/lf-edge/ekuiper/v2/extensions/impl/tspoint" + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +func TestConfig(t *testing.T) { + tests := []struct { + name string + conf map[string]interface{} + expected c + error string + }{ + { // 0 + name: "test1", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "token": "Token_test", + "measurement": "test", + "org": "admin", + "bucket": "bucket_one", + "tags": map[string]interface{}{ + "tag": "value", + }, + "fields": []interface{}{"temperature"}, + "tsFieldName": "ts", + }, + expected: c{ + Addr: "http://192.168.0.3:8086", + Token: "Token_test", + Org: "admin", + Bucket: "bucket_one", + PrecisionStr: "ms", + Precision: time.Millisecond, + UseLineProtocol: false, + Measurement: "test", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag": "value", + }, + TsFieldName: "ts", + PrecisionStr: "ms", + }, + BatchSize: 1, + }, + }, + { + name: "unmarshall error", + conf: map[string]interface{}{ + "org": 12, + }, + error: "error configuring influx2 sink: 1 error(s) decoding:\n\n* 'org' expected type 'string', got unconvertible type 'int', value: '12'", + }, + { + name: "addr missing error", + conf: map[string]interface{}{}, + error: "addr is required", + }, + { + name: "org missing error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + }, + error: "org is required", + }, + { + name: "bucket missing error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "org": "abc", + }, + error: "bucket is required", + }, + { + name: "precision invalid error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "org": "abc", + "bucket": "bucket_one", + "precision": "abc", + }, + error: "precision abc is not supported", + }, + { + name: "measurement missing error", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "org": "abc", + "bucket": "bucket_one", + "precision": "ns", + }, + error: "measurement is required", + }, + { + name: "unmarshall error for tls", + conf: map[string]interface{}{ + "addr": "http://192.168.0.3:8086", + "org": "abc", + "bucket": "bucket_one", + "precision": "ns", + "measurement": "mm", + "rootCaPath": 12, + }, + error: "error configuring tls: 1 error(s) decoding:\n\n* 'rootCaPath' expected type 'string', got unconvertible type 'int', value: '12'", + }, + } + ctx := mockContext.NewMockContext("testconfig", "op") + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink2{} + err := ifsink.Provision(ctx, test.conf) + if test.error == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.Equal(t, test.error, err.Error()) + return + } + assert.Equal(t, test.expected, ifsink.conf) + }) + } +} + +func TestCollectPoints(t *testing.T) { + timex.Set(10) + tests := []struct { + name string + conf c + data any + result []*write.Point + }{ + { + name: "normal", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + }, + }, + data: map[string]any{ + "temperature": 20, + "humidity": 50, + }, + result: []*write.Point{ + client.NewPoint("test1", map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, map[string]any{ + "temperature": 20, + "humidity": 50, + }, time.UnixMilli(10)), + }, + }, + { + name: "normal batch", + conf: c{ + Measurement: "test2", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "s", + }, + PrecisionStr: "s", + }, + data: []map[string]any{ + { + "temperature": 20, + "humidity": 50, + }, + { + "temperature": 30, + "humidity": 60, + }, + }, + result: []*write.Point{ + client.NewPoint("test2", map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, map[string]any{ + "temperature": 20, + "humidity": 50, + }, time.UnixMilli(10)), + client.NewPoint("test2", map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, map[string]any{ + "temperature": 30, + "humidity": 60, + }, time.UnixMilli(10)), + }, + }, + { + name: "normal batch sendSingle", + conf: c{ + Measurement: "test3", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "{{.humidity}}", + "tag2": "value2", + }, + PrecisionStr: "s", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + "humidity": 50, + "ts": 100, + }, + { + "temperature": 30, + "humidity": 60, + "ts": 110, + }, + }, + result: []*write.Point{ + client.NewPoint("test3", map[string]string{ + "tag1": "50", + "tag2": "value2", + }, map[string]any{ + "temperature": 20, + "humidity": 50, + "ts": 100, + }, time.Unix(100, 0)), + client.NewPoint("test3", map[string]string{ + "tag1": "60", + "tag2": "value2", + }, map[string]any{ + "temperature": 30, + "humidity": 60, + "ts": 110, + }, time.Unix(110, 0)), + }, + }, + { + name: "batch/sendSingle with dataTemplate", + conf: c{ + Measurement: "test4", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "us", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "t": 20, + "ts": 100, + }, + { + "t": 30, + "ts": 110, + }, + }, + result: []*write.Point{ + client.NewPoint("test4", map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, map[string]any{ + "t": 20, + "ts": 100, + }, time.UnixMicro(100)), + client.NewPoint("test4", map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, map[string]any{ + "t": 30, + "ts": 110, + }, time.UnixMicro(110)), + }, + }, + { + name: "single with fields", + conf: c{ + Measurement: "test5", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "{{.humidity}}", + }, + PrecisionStr: "ns", + TsFieldName: "ts", + }, + }, + data: map[string]any{ + "humidity": 50, + "ts": 100, + }, + result: []*write.Point{ + client.NewPoint("test5", map[string]string{ + "tag1": "value1", + "tag2": "50", + }, map[string]any{ + "humidity": 50, + "ts": 100, + }, time.Unix(0, 100)), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink2{ + conf: test.conf, + } + ctx := mockContext.NewMockContext(test.name, "op") + points, err := ifsink.transformPoints(ctx, test.data) + assert.NoError(t, err) + assert.Equal(t, test.result, points) + }) + } +} + +func TestCollectPointsError(t *testing.T) { + tests := []struct { + name string + conf c + data any + err string + }{ + { + name: "unsupported data", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + }, + }, + data: []byte{1, 2, 3}, + err: "sink needs map or []map, but receive unsupported data [1 2 3]", + }, + { + name: "single without ts field", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + TsFieldName: "ts", + }, + }, + data: map[string]any{ + "temperature": 20, + "humidity": 50, + }, + err: "time field ts not found", + }, + { + name: "normal batch with incorrect ts field", + conf: c{ + Measurement: "test2", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + PrecisionStr: "s", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + "humidity": 50, + "ts": "add", + }, + { + "temperature": 30, + "humidity": 60, + "ts": "ddd", + }, + }, + err: "time field ts can not convert to timestamp(int64) : add", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink2{ + conf: test.conf, + } + ctx := mockContext.NewMockContext(test.name, "op") + _, err := ifsink.transformPoints(ctx, test.data) + assert.Error(t, err) + assert.Equal(t, test.err, err.Error()) + }) + } +} + +// Do not test for multiple tags and data to avoid order problem +func TestCollectLines(t *testing.T) { + timex.Set(10) + tests := []struct { + name string + conf c + data any + result []string + }{ + { + name: "normal", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + }, + }, + UseLineProtocol: true, + }, + data: map[string]any{ + "name": "home", + }, + result: []string{"test1,tag1=value1 name=\"home\" 10"}, + }, + { + name: "normal batch", + conf: c{ + Measurement: "test2", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag2": "value2", + }, + PrecisionStr: "ns", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + }, + { + "humidity": 60, + }, + }, + result: []string{"test2,tag2=value2 temperature=20 10000000", "test2,tag2=value2 humidity=60 10000000"}, + }, + { + name: "normal batch sendSingle", + conf: c{ + Measurement: "test3", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + }, + PrecisionStr: "us", + }, + }, + data: []map[string]any{ + { + "humidity": 50, + }, + { + "temperature": 30, + }, + }, + result: []string{"test3,tag1=value1 humidity=50 10000", "test3,tag1=value1 temperature=30 10000"}, + }, + { + name: "single with fields", + conf: c{ + Measurement: "test5", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag2": "{{.humidity}}", + }, + PrecisionStr: "s", + TsFieldName: "ts", + }, + }, + data: map[string]any{ + "humidity": 50, + "ts": 100, + }, + result: []string{"test5,tag2=50 humidity=50,ts=100 100"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink2{ + conf: test.conf, + } + ctx := mockContext.NewMockContext(test.name, "op") + lines, err := ifsink.transformLines(ctx, test.data) + assert.NoError(t, err) + assert.Equal(t, test.result, lines) + }) + } +} + +func TestCollectLinesError(t *testing.T) { + tests := []struct { + name string + conf c + data any + err string + }{ + { + name: "unsupported data", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "value2", + }, + }, + }, + data: []byte{1, 2, 3}, + err: "sink needs map or []map, but receive unsupported data [1 2 3]", + }, + { + name: "single wrong ts format", + conf: c{ + Measurement: "test1", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + }, + TsFieldName: "name", + }, + UseLineProtocol: true, + }, + data: map[string]any{ + "name": "home", + }, + err: "time field name can not convert to timestamp(int64) : home", + }, + { + name: "batch wront ts field", + conf: c{ + Measurement: "test2", + WriteOptions: tspoint.WriteOptions{ + Tags: map[string]string{ + "tag2": "value2", + }, + PrecisionStr: "ns", + TsFieldName: "ts", + }, + }, + data: []map[string]any{ + { + "temperature": 20, + }, + { + "humidity": 60, + }, + }, + err: "time field ts not found", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ifsink := &influxSink2{ + conf: test.conf, + } + ctx := mockContext.NewMockContext(test.name, "op") + _, err := ifsink.transformLines(ctx, test.data) + assert.Error(t, err) + assert.Equal(t, test.err, err.Error()) + }) + } +} diff --git a/extensions/impl/tspoint/transform.go b/extensions/impl/tspoint/transform.go new file mode 100644 index 0000000000..e2943ee5f2 --- /dev/null +++ b/extensions/impl/tspoint/transform.go @@ -0,0 +1,163 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tspoint + +import ( + "fmt" + "strings" + "time" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/pkg/cast" + "github.com/lf-edge/ekuiper/v2/pkg/timex" +) + +type WriteOptions struct { + PrecisionStr string `json:"precision"` + + Tags map[string]string `json:"tags"` + TsFieldName string `json:"tsFieldName"` +} + +func (o *WriteOptions) Validate() error { + switch o.PrecisionStr { + case "ms", "s", "us", "ns": + // no error + default: + return fmt.Errorf("precision %s is not supported", o.PrecisionStr) + } + return nil +} + +func (o *WriteOptions) ValidateTagTemplates(ctx api.StreamContext) error { + for _, v := range o.Tags { + _, err := ctx.ParseTemplate(v, nil) + if err != nil && strings.HasPrefix(err.Error(), "Template Invalid") { + return err + } + } + return nil +} + +type RawPoint struct { + Fields map[string]any + Tags map[string]string + Tt time.Time + Ts int64 +} + +func SinkTransform(ctx api.StreamContext, data any, options *WriteOptions) ([]*RawPoint, error) { + var pts []*RawPoint + switch dd := data.(type) { + case map[string]any: + pt, err := singleMapToPoint(ctx, dd, options) + if err != nil { + return nil, err + } + pts = append(pts, pt) + case []map[string]any: + pts = make([]*RawPoint, 0, len(dd)) + // TODO possible problem here that the ts filed is transformed out + for _, d := range dd { + tt, ts, err := getTime(d, options.TsFieldName, options.PrecisionStr) + if err != nil { + return nil, err + } + pt, err := mapToPoint(ctx, d, options, tt, ts) + if err != nil { + return nil, err + } + pts = append(pts, pt) + } + default: + return nil, fmt.Errorf("sink needs map or []map, but receive unsupported data %v", dd) + } + return pts, nil +} + +// Method to convert map to influxdb point, including the sink transforms + map to point +func singleMapToPoint(ctx api.StreamContext, dd map[string]any, options *WriteOptions) (*RawPoint, error) { + tt, ts, err := getTime(dd, options.TsFieldName, options.PrecisionStr) + if err != nil { + return nil, err + } + return mapToPoint(ctx, dd, options, tt, ts) +} + +// Internal method to transform map to influxdb point +func mapToPoint(ctx api.StreamContext, mm map[string]any, options *WriteOptions, tt time.Time, ts int64) (*RawPoint, error) { + tagEval := make(map[string]string, len(options.Tags)) + for k, v := range options.Tags { + vv, err := ctx.ParseTemplate(v, mm) + if err != nil { + return nil, fmt.Errorf("parse %s tag template %s failed, err:%v", k, v, err) + } + // convertAll has no error + vs, _ := cast.ToString(vv, cast.CONVERT_ALL) + tagEval[k] = vs + } + return &RawPoint{ + Fields: mm, + Tags: tagEval, + Tt: tt, + Ts: ts, + }, nil +} + +// Internal method to get time from map with tsFieldName +func getTime(data map[string]any, tsFieldName string, precisionStr string) (time.Time, int64, error) { + if tsFieldName != "" { + v64, err := getTS(data, tsFieldName) + if err != nil { + return time.Time{}, v64, err + } + switch precisionStr { + case "ms": + return time.UnixMilli(v64), v64, nil + case "s": + return time.Unix(v64, 0), v64, nil + case "us": + return time.UnixMicro(v64), v64, nil + case "ns": + return time.Unix(0, v64), v64, nil + } + return time.UnixMilli(v64), v64, nil + } else { + tt := timex.GetNow() + switch precisionStr { + case "ms": + return tt, tt.UnixMilli(), nil + case "s": + return tt, tt.Unix(), nil + case "us": + return tt, tt.UnixMicro(), nil + case "ns": + return tt, tt.UnixNano(), nil + } + return tt, tt.UnixMilli(), nil + } +} + +func getTS(data map[string]any, tsFieldName string) (int64, error) { + v, ok := data[tsFieldName] + if !ok { + return 0, fmt.Errorf("time field %s not found", tsFieldName) + } + v64, err := cast.ToInt64(v, cast.CONVERT_SAMEKIND) + if err != nil { + return 0, fmt.Errorf("time field %s can not convert to timestamp(int64) : %v", tsFieldName, v) + } + return v64, nil +} diff --git a/extensions/impl/tspoint/transform_test.go b/extensions/impl/tspoint/transform_test.go new file mode 100644 index 0000000000..fcf056ec89 --- /dev/null +++ b/extensions/impl/tspoint/transform_test.go @@ -0,0 +1,73 @@ +// Copyright 2023-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tspoint + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + mockContext "github.com/lf-edge/ekuiper/v2/pkg/mock/context" +) + +func Test_parseTemplates(t *testing.T) { + tests := []struct { + name string + conf WriteOptions + err string + }{ + { + name: "normal", + conf: WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + }, + }, + }, + { + name: "normal with template", + conf: WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "{{.temperature}}", + "tag3": "100", + }, + }, + }, + { + name: "error template", + conf: WriteOptions{ + Tags: map[string]string{ + "tag1": "value1", + "tag2": "{{abc .temperature}}", + "tag3": "100", + }, + }, + err: "Template Invalid: template: sink:1: function \"abc\" not defined", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := mockContext.NewMockContext("parseTemplate", "op") + err := tt.conf.ValidateTagTemplates(ctx) + if tt.err == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.Equal(t, tt.err, err.Error()) + } + }) + } +} diff --git a/extensions/sinks/influx/influx.go b/extensions/sinks/influx/influx.go new file mode 100644 index 0000000000..06f8a4c370 --- /dev/null +++ b/extensions/sinks/influx/influx.go @@ -0,0 +1,56 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + _ "github.com/influxdata/influxdb1-client/v2" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/influx" +) + +func Influx() api.Sink { + return influx.GetSink() +} + +//// This is for manual test +//func main() { +// i := Influx() +// err := i.Configure(map[string]interface{}{ +// "addr": "http://127.0.0.1:8086", +// "measurement": "test", +// "database": "mydb", +// "tags": map[string]interface{}{ +// "tag": "{{.humidity}}", +// }, +// }) +// if err != nil { +// panic(err) +// } +// contextLogger := conf.Log.WithField("rule", "ruleInflux") +// ctx := context.WithValue(context.Background(), context.LoggerKey, contextLogger) +// err = i.Open(ctx) +// if err != nil { +// panic(err) +// } +// err = i.Collect(ctx, map[string]interface{}{"temperature": 30, "humidity": 80}) +// if err != nil { +// panic(err) +// } +// err = i.Close(ctx) +// if err != nil { +// panic(err) +// } +//} diff --git a/extensions/sinks/influx/influx.json b/extensions/sinks/influx/influx.json new file mode 100644 index 0000000000..f7c0379ca1 --- /dev/null +++ b/extensions/sinks/influx/influx.json @@ -0,0 +1,237 @@ +{ + "about": { + "trial": true, + "author": { + "name": "Yongxing Ma", + "email": "", + "company": "", + "website": "" + }, + "helpUrl": { + "en_US": "https://ekuiper.org/docs/en/latest/guide/sinks/plugin/influx.html", + "zh_CN": "https://ekuiper.org/docs/zh/latest/guide/sinks/plugin/influx.html" + }, + "description": { + "en_US": "This a sink plugin for InfluxDB, it can be used for saving the analysis data into InfluxDB.", + "zh_CN": "本插件为 InfluxDB 的持久化插件,可以用于将分析数据存入 InfluxDB 中" + } + }, + "libs": [ + "github.com/influxdata/influxdb1-client/v2@master" + ], + "properties": [ + { + "name": "addr", + "default": "http://127.0.0.1:8086", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The addr of the InfluxDB", + "zh_CN": "InfluxDB 的地址" + }, + "label": { + "en_US": "Addr", + "zh_CN": "地址" + } + }, + { + "name": "database", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The database of the InfluxDB", + "zh_CN": "InfluxDB 的数据库名" + }, + "label": { + "en_US": "Database name", + "zh_CN": "数据库名" + } + }, + { + "name": "username", + "default": "", + "optional": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The InfluxDB login username", + "zh_CN": "InfluxDB 登陆用户名" + }, + "label": { + "en_US": "Username", + "zh_CN": "用户名" + } + }, + { + "name": "password", + "default": "", + "optional": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The InfluxDB login password", + "zh_CN": "InfluxDB 登陆密码" + }, + "label": { + "en_US": "Password", + "zh_CN": "密码" + } + }, + { + "name": "certificationPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of certification path. It can be an absolute path, or a relative path.", + "zh_CN": "证书路径。可以为绝对路径,也可以为相对路径。如果指定的是相对路径,那么父目录为执行 server 命令的路径。" + }, + "label": { + "en_US": "Certification path", + "zh_CN": "证书路径" + } + }, + { + "name": "privateKeyPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of private key path. It can be an absolute path, or a relative path. ", + "zh_CN": "私钥路径。可以为绝对路径,也可以为相对路径。" + }, + "label": { + "en_US": "Private key path", + "zh_CN": "私钥路径" + } + }, + { + "name": "rootCaPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of root ca path. It can be an absolute path, or a relative path. ", + "zh_CN": "根证书路径,用以验证服务器证书。可以为绝对路径,也可以为相对路径。" + }, + "label": { + "en_US": "Root CA path", + "zh_CN": "根证书路径" + } + }, + { + "name": "insecureSkipVerify", + "default": false, + "optional": true, + "connection_related": true, + "control": "radio", + "type": "bool", + "hint": { + "en_US": "Control if to skip the certification verification. If it is set to true, then skip certification verification; Otherwise, verify the certification.", + "zh_CN": "控制是否跳过证书认证。如果被设置为 true,那么跳过证书认证;否则进行证书验证。" + }, + "label": { + "en_US": "Skip Certification verification", + "zh_CN": "跳过证书验证" + } + }, + { + "name": "precision", + "default": "ms", + "optional": false, + "control": "select", + "type": "string", + "values": [ + "s", + "ms", + "us", + "ns" + ], + "hint": { + "en_US": "The time precision, can be set to ns, us, ms, s. Default is ms.", + "zh_CN": "时间精度,可设置为 ns, us, ms, s。默认为 ms。" + }, + "label": { + "en_US": "Precision", + "zh_CN": "时间精度" + } + }, + { + "name": "measurement", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The measurement of the InfluxDb", + "zh_CN": "InfluxDb 的 Measurement" + }, + "label": { + "en_US": "Measurement", + "zh_CN": "Measurement" + } + }, + { + "name": "tsFieldName", + "default": "", + "optional": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "If set, the written timestamp will use the value of the field. For example, if the data has {\"ts\": 1888888888} and the tsFieldName is set to ts, then the value 1888888888 will be used when written to InfluxDB. Make sure the value is formatted according to the precision. If not set, the current timestamp will be used.", + "zh_CN": "若有设置,写入时的时间戳以该字段的值为准。例如,假设数据为 {\"ts\": 1888888888} 且 tsFieldName 属性设置为 ts,则 1888888888 将作为此条数据写入作为的时间戳。此时,需要确保时间戳的值的精度与 precision 的配置相同。 如果该属性未设置,则写入时采用当时的时间戳。" + }, + "label": { + "en_US": "Timestamp Field Name", + "zh_CN": "时间戳字段名" + } + }, + { + "name": "fields", + "default": [], + "optional": true, + "control": "list", + "type": "list_string", + "hint": { + "en_US": "Fields to be sent", + "zh_CN": "返回的数据字段。" + }, + "label": { + "en_US": "Fields", + "zh_CN": "Fields" + } + }, + { + "name": "tags", + "default": [], + "optional": true, + "control": "list", + "type": "object", + "hint": { + "en_US": "The tags to write, the format is like {\"tag1\":\"value1\"}. The value can be dataTemplate format, like {\"tag1\":\"{{.temperature}}\"}", + "zh_CN": "标签键值对,其格式为 {\"tag1\":\"value1\"}。其中,值可为数据模板格式,例如 {\"tag1\":\"{{.temperature}}\"}" + }, + "label": { + "en_US": "Tags", + "zh_CN": "标签" + } + } + ], + "node": { + "category": "sink", + "icon": "iconPath", + "label": { + "en": "InfluxDB 1", + "zh": "InfluxDB 1" + } + } +} diff --git a/extensions/sinks/influx2/influx2.go b/extensions/sinks/influx2/influx2.go new file mode 100644 index 0000000000..f704876f84 --- /dev/null +++ b/extensions/sinks/influx2/influx2.go @@ -0,0 +1,58 @@ +// Copyright 2021-2024 EMQ Technologies Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + _ "github.com/influxdata/influxdb-client-go/v2" + + "github.com/lf-edge/ekuiper/contract/v2/api" + "github.com/lf-edge/ekuiper/v2/extensions/impl/influx2" +) + +func Influx2() api.Sink { + return influx2.GetSink() +} + +//// This is for manual test +//func main() { +// i := Influx2() +// err := i.Configure(map[string]interface{}{ +// "addr": "http://127.0.0.1:8086", +// "token": "q1w2e3r4", +// "measurement": "m1", +// "org": "test", +// "bucket": "test", +// "tags": map[string]interface{}{ +// "tag": "value", +// }, +// }) +// if err != nil { +// panic(err) +// } +// contextLogger := conf.Log.WithField("rule", "rule2") +// ctx := context.WithValue(context.Background(), context.LoggerKey, contextLogger) +// err = i.Open(ctx) +// if err != nil { +// panic(err) +// } +// err = i.Collect(ctx, map[string]interface{}{"temperature": 30}) +// if err != nil { +// panic(err) +// } +// err = i.Close(ctx) +// if err != nil { +// panic(err) +// } +//} diff --git a/extensions/sinks/influx2/influx2.json b/extensions/sinks/influx2/influx2.json new file mode 100644 index 0000000000..cd6d24e6e3 --- /dev/null +++ b/extensions/sinks/influx2/influx2.json @@ -0,0 +1,267 @@ +{ + "about": { + "trial": true, + "author": { + "name": "elpsyr", + "email": "hellccqcq@gmail.com", + "company": "", + "website": "https://github.com/elpsyr" + }, + "helpUrl": { + "en_US": "https://ekuiper.org/docs/en/latest/guide/sinks/plugin/influx2.html", + "zh_CN": "https://ekuiper.org/docs/zh/latest/guide/sinks/plugin/influx2.html" + }, + "description": { + "en_US": "This a sink plugin for InfluxDB2, it can be used for saving the analysis data into InfluxDB V2.X .", + "zh_CN": "本插件为 InfluxDB2 的持久化插件,可以用于将分析数据存入 InfluxDB V2.X 中" + } + }, + "libs": [ + "github.com/influxdata/influxdb-client-go/v2@master" + ], + "properties": [ + { + "name": "addr", + "default": "http://127.0.0.1:8086", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The addr of the InfluxDB", + "zh_CN": "InfluxDB 的地址" + }, + "label": { + "en_US": "Addr", + "zh_CN": "地址" + } + }, + { + "name": "bucket", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The InfluxDB bucket", + "zh_CN": "InfluxDB bucket" + }, + "label": { + "en_US": "Bucket", + "zh_CN": "Bucket" + } + }, + { + "name": "token", + "default": "", + "optional": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "InfluxDB token", + "zh_CN": "InfluxDB token" + }, + "label": { + "en_US": "Token", + "zh_CN": "Token" + } + }, + { + "name": "org", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "InfluxDB org", + "zh_CN": "InfluxDB org" + }, + "label": { + "en_US": "org", + "zh_CN": "组织" + } + }, + { + "name": "certificationPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of certification path. It can be an absolute path, or a relative path.", + "zh_CN": "证书路径。可以为绝对路径,也可以为相对路径。如果指定的是相对路径,那么父目录为执行 server 命令的路径。" + }, + "label": { + "en_US": "Certification path", + "zh_CN": "证书路径" + } + }, + { + "name": "privateKeyPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of private key path. It can be an absolute path, or a relative path. ", + "zh_CN": "私钥路径。可以为绝对路径,也可以为相对路径。" + }, + "label": { + "en_US": "Private key path", + "zh_CN": "私钥路径" + } + }, + { + "name": "rootCaPath", + "default": "", + "optional": true, + "connection_related": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "The location of root ca path. It can be an absolute path, or a relative path. ", + "zh_CN": "根证书路径,用以验证服务器证书。可以为绝对路径,也可以为相对路径。" + }, + "label": { + "en_US": "Root CA path", + "zh_CN": "根证书路径" + } + }, + { + "name": "insecureSkipVerify", + "default": false, + "optional": true, + "connection_related": true, + "control": "radio", + "type": "bool", + "hint": { + "en_US": "Control if to skip the certification verification. If it is set to true, then skip certification verification; Otherwise, verify the certification.", + "zh_CN": "控制是否跳过证书认证。如果被设置为 true,那么跳过证书认证;否则进行证书验证。" + }, + "label": { + "en_US": "Skip Certification verification", + "zh_CN": "跳过证书验证" + } + }, + { + "name": "precision", + "default": "ms", + "optional": false, + "control": "select", + "type": "string", + "values": [ + "s", + "ms", + "us", + "ns" + ], + "hint": { + "en_US": "The time precision, can be set to ns, us, ms, s. Default is ms.", + "zh_CN": "时间精度,可设置为 ns, us, ms, s。默认为 ms。" + }, + "label": { + "en_US": "Precision", + "zh_CN": "时间精度" + } + }, + { + "name": "useLineProtocol", + "default": false, + "optional": true, + "control": "radio", + "type": "bool", + "hint": { + "en_US": "If line protocol is set, the dataTemplate must format to the line protocol format.", + "zh_CN": "若使用行协议写入,设置数据模板属性时,其格式化结果应当按照行协议格式进行格式化。" + }, + "label": { + "en_US": "Use Line Protocol", + "zh_CN": "使用行协议" + } + }, + { + "name": "measurement", + "default": "", + "optional": false, + "control": "text", + "type": "string", + "hint": { + "en_US": "The measurement of the InfluxDB", + "zh_CN": "InfluxDB 的 measurement" + }, + "label": { + "en_US": "Measurement", + "zh_CN": "Measurement" + } + }, + { + "name": "tsFieldName", + "default": "", + "optional": true, + "control": "text", + "type": "string", + "hint": { + "en_US": "If set, the written timestamp will use the value of the field. For example, if the data has {\"ts\": 1888888888} and the tsFieldName is set to ts, then the value 1888888888 will be used when written to InfluxDB. Make sure the value is formatted according to the precision. If not set, the current timestamp will be used.", + "zh_CN": "若有设置,写入时的时间戳以该字段的值为准。例如,假设数据为 {\"ts\": 1888888888} 且 tsFieldName 属性设置为 ts,则 1888888888 将作为此条数据写入作为的时间戳。此时,需要确保时间戳的值的精度与 precision 的配置相同。 如果该属性未设置,则写入时采用当时的时间戳。" + }, + "label": { + "en_US": "Timestamp Field Name", + "zh_CN": "时间戳字段名" + } + }, + { + "name": "fields", + "default": [], + "optional": true, + "control": "list", + "type": "list_string", + "hint": { + "en_US": "Fields to be sent", + "zh_CN": "返回的数据字段。" + }, + "label": { + "en_US": "Fields", + "zh_CN": "Fields" + } + }, + { + "name": "tags", + "default": {}, + "optional": true, + "control": "list", + "type": "object", + "hint": { + "en_US": "The tags to write, the format is like {\"tag1\":\"value1\"}. The value can be dataTemplate format, like {\"tag1\":\"{{.temperature}}\"}", + "zh_CN": "标签键值对,其格式为 {\"tag1\":\"value1\"}。其中,值可为数据模板格式,例如 {\"tag1\":\"{{.temperature}}\"}" + }, + "label": { + "en_US": "Tags", + "zh_CN": "标签" + } + }, + { + "name": "dataTemplate", + "default": "", + "optional": true, + "control": "textarea", + "type": "string", + "hint": { + "en_US": "The golang template format string to specify the output data format. The input of the template is the sink message which is always an array of map. If no data template is specified, the raw input will be the data.", + "zh_CN": "Golang 模板格式字符串,用于指定输出数据格式。 模板的输入是目标消息,该消息始终是 map 数组。 如果未指定数据模板,则将数据作为原始输入。" + }, + "label": { + "en_US": "Data template", + "zh_CN": "数据模版" + } + } + ], + "node": { + "category": "sink", + "icon": "iconPath", + "label": { + "en": "InfluxDB 2", + "zh": "InfluxDB 2" + } + } +} diff --git a/go.mod b/go.mod index eb6fe135d5..67620a9c41 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,8 @@ require ( github.com/gorilla/handlers v1.5.2 github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.1 + github.com/influxdata/influxdb-client-go/v2 v2.13.0 + github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d github.com/jhump/protoreflect v1.15.6 github.com/jinzhu/now v1.1.5 github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1 @@ -66,6 +68,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/aws/aws-sdk-go v1.38.20 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bufbuild/protocompile v0.9.0 // indirect @@ -96,6 +99,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.16 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -109,6 +113,7 @@ require ( github.com/nats-io/nuid v1.0.1 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/nxadm/tail v1.4.11 // indirect + github.com/oapi-codegen/runtime v1.0.0 // indirect github.com/onsi/ginkgo v1.16.4 // indirect github.com/onsi/gomega v1.13.0 // indirect github.com/pingcap/errors v0.11.4 // indirect diff --git a/go.sum b/go.sum index 63e1e05253..4d965e8bfa 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,7 @@ github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbV github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEsylIk= github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/Rookiecom/cpuprofile v1.0.2 h1:wXGOmpSyCfoi7D0OB79srblNSw5Ccn0fTy4c537yWy4= github.com/Rookiecom/cpuprofile v1.0.2/go.mod h1:JEBcjNHceN6/g4ppueDEqWlbtbMGMyLy0YlT8QnKFbE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -39,6 +40,8 @@ github.com/alicebob/miniredis/v2 v2.30.0 h1:uA3uhDbCxfO9+DI/DuGeAMr9qI+noVWwGPNT github.com/alicebob/miniredis/v2 v2.30.0/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/apple/foundationdb/bindings/go v0.0.0-20240315202255-8943393e84fc h1:UDw1YBxVLq6vldL3ncJOtSgbnsG+2h4aHsDi9EFN/Bo= github.com/apple/foundationdb/bindings/go v0.0.0-20240315202255-8943393e84fc/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -57,6 +60,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -284,7 +288,12 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM= +github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d h1:/WZQPMZNsjZ7IlCpsLGdQBINg5bxKQ1K1sh6awxLtkA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= github.com/jhump/protoreflect v1.15.6 h1:WMYJbw2Wo+KOWwZFvgY0jMoVHM6i4XIvRs2RcBj5VmI= github.com/jhump/protoreflect v1.15.6/go.mod h1:jCHoyYQIJnaabEYnbGwyo9hUqfyUMTbJw/tAut5t97E= @@ -305,6 +314,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1 h1:JL2rWnBX8jnbHHlLcLde3BBWs+jzqZvOmF+M3sXoNOE= github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1/go.mod h1:nNLjpEi4xVFB7358xLPpPscdvXP+pbhiHgSmjIur8z0= @@ -395,6 +405,8 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= +github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -509,6 +521,7 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= diff --git a/internal/binder/io/ext_full.go b/internal/binder/io/ext_full.go index 367c956dd5..82de753dd2 100644 --- a/internal/binder/io/ext_full.go +++ b/internal/binder/io/ext_full.go @@ -19,6 +19,8 @@ package io import ( "github.com/lf-edge/ekuiper/contract/v2/api" "github.com/lf-edge/ekuiper/v2/extensions/impl/image" + "github.com/lf-edge/ekuiper/v2/extensions/impl/influx" + "github.com/lf-edge/ekuiper/v2/extensions/impl/influx2" sql2 "github.com/lf-edge/ekuiper/v2/extensions/impl/sql" "github.com/lf-edge/ekuiper/v2/extensions/impl/sql/client" "github.com/lf-edge/ekuiper/v2/extensions/impl/video" @@ -31,8 +33,8 @@ func init() { //modules.RegisterSource("kafka", func() api.Source { return kafkaSrc.GetSource() }) //modules.RegisterLookupSource("sql", func() api.LookupSource { return sql.GetLookup() }) modules.RegisterSink("image", func() api.Sink { return image.GetSink() }) - //modules.RegisterSink("influx", func() api.Sink { return influx.GetSink() }) - //modules.RegisterSink("influx2", func() api.Sink { return influx2.GetSink() }) + modules.RegisterSink("influx", func() api.Sink { return influx.GetSink() }) + modules.RegisterSink("influx2", func() api.Sink { return influx2.GetSink() }) //modules.RegisterSink("kafka", func() api.Sink { return kafka.GetSink() }) modules.RegisterSource("sql", sql2.GetSource) modules.RegisterSink("sql", sql2.GetSink)