feat(时序数据库集成): tdengine\influxdb\iotdb数据库集成

This commit is contained in:
gx_ma
2025-12-17 14:57:18 +08:00
parent 8501632079
commit e0e9f19d85
58 changed files with 6078 additions and 154 deletions

View File

@@ -0,0 +1,70 @@
version: '2'
networks:
network:
driver: bridge
ipam:
driver: default
config:
- subnet: 177.7.0.0/16
services:
tdengine:
image: 'tdengine/tdengine:3.3.5.8'
container_name: tdengine
privileged: true
hostname: fastbee
ports:
- 6030:6030
- 6041:6041
- 6043-6049:6043-6049
- 6043-6049:6043-6049/udp
volumes:
- /var/data/tdengine/log:/var/log/taos
- /var/data/tdengine/data:/var/lib/taos
- /var/data/tdengine/conf:/etc/taos
- /etc/localtime:/etc/localtime
environment:
TZ: Asia/Shanghai
networks:
network:
ipv4_address: 177.7.0.30
influxdb:
image: influxdb:2.7.5
container_name: influxdb
ports:
- 8086:8086
volumes:
- ./influxdb2:/var/lib/influxdb2
environment:
DOCKER_INFLUXDB_INIT_MODE: "setup"
DOCKER_INFLUXDB_INIT_USERNAME: "admin"
DOCKER_INFLUXDB_INIT_PASSWORD: "admin123"
DOCKER_INFLUXDB_INIT_ORG: "fastbee"
DOCKER_INFLUXDB_INIT_BUCKET: "device_log"
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: "inX0k-IPfSgKg6AIfoZm6Mv0DQyQOKCkfvs5ZF3a836Yzx2Ew9QgxsHev40_2gztuMn6tofwyS6nfbT4cD-SeA=="
networks:
network:
ipv4_address: 177.7.0.31
iotdb:
image: apache/iotdb:1.3.3-standalone
hostname: iotdb
container_name: iotdb
restart: always
ports:
- 6667:6667
- 5555:5555
- 8070:8070
- 9003:9003
privileged: true
volumes:
- /var/data/iotdb/data:/iotdb/data
- /var/data/iotdb/logs:/iotdb/logs
- /var/data/iotdb/conf:/iotdb/conf
environment:
TZ: Asia/Shanghai
networks:
network:
ipv4_address: 177.7.0.32

View File

@@ -63,6 +63,7 @@ services:
- redis
- mysql
- zlmedia
- tdengine
volumes:
- /var/data/java/fastbee-admin.jar:/server.jar
- /var/data/java/uploadPath:/uploadPath
@@ -117,5 +118,25 @@ services:
network:
ipv4_address: 177.7.0.15
# tdengine:
# image: 'tdengine/tdengine:3.3.5.8'
# container_name: tdengine
# privileged: true
# hostname: fastbee
# ports:
# - 6030:6030
# - 6041:6041
# - 6043-6049:6043-6049
# - 6043-6049:6043-6049/udp
# volumes:
# - /var/data/tdengine/log:/var/log/taos
# - /var/data/tdengine/data:/var/lib/taos
# - /var/data/tdengine/conf:/etc/taos
# - /etc/localtime:/etc/localtime
# environment:
# TZ: Asia/Shanghai
# networks:
# network:
# ipv4_address: 177.7.0.16

View File

@@ -0,0 +1,156 @@
@REM
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM http://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM
@echo off
@REM You can set datanode memory size, example '2G' or '2048M'
set MEMORY_SIZE=
@REM true or false
@REM DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (%CONFIGNODE_CONF%\jmx.password and %{CONFIGNODE_CONF%\jmx.access)
set JMX_LOCAL="true"
set JMX_PORT="32000"
@REM only take effect when the jmx_local=false
@REM You need to change this IP as a public IP if you want to remotely connect IoTDB ConfigNode by JMX.
@REM 0.0.0.0 is not allowed
set JMX_IP="127.0.0.1"
if %JMX_LOCAL% == "false" (
echo "setting remote JMX..."
@REM you may have no permission to run chmod. If so, contact your system administrator.
set CONFIGNODE_JMX_OPTS=-Dcom.sun.management.jmxremote^
-Dcom.sun.management.jmxremote.port=%JMX_PORT%^
-Dcom.sun.management.jmxremote.rmi.port=%JMX_PORT%^
-Djava.rmi.server.randomIDs=true^
-Dcom.sun.management.jmxremote.ssl=false^
-Dcom.sun.management.jmxremote.authenticate=false^
-Dcom.sun.management.jmxremote.password.file="%CONFIGNODE_CONF%\jmx.password"^
-Dcom.sun.management.jmxremote.access.file="%CONFIGNODE_CONF%\jmx.access"^
-Djava.rmi.server.hostname=%JMX_IP%
) else (
echo "setting local JMX..."
)
set CONFIGNODE_JMX_OPTS=%CONFIGNODE_JMX_OPTS% -Diotdb.jmx.local=%JMX_LOCAL%
for /f %%b in ('wmic cpu get numberofcores ^| findstr "[0-9]"') do (
set system_cpu_cores=%%b
)
if %system_cpu_cores% LSS 1 set system_cpu_cores=1
for /f %%b in ('wmic ComputerSystem get TotalPhysicalMemory ^| findstr "[0-9]"') do (
set system_memory=%%b
)
echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > "%CONFIGNODE_HOME%\sbin\tmp.vbs"
for /f "tokens=*" %%a in ('cscript //nologo "%CONFIGNODE_HOME%\sbin\tmp.vbs"') do set system_memory_in_mb=%%a
del "%CONFIGNODE_HOME%\sbin\tmp.vbs"
set system_memory_in_mb=%system_memory_in_mb:,=%
@REM suggest using memory, system memory 3 / 10
set /a suggest_=%system_memory_in_mb%/10*3
if "%MEMORY_SIZE%"=="" (
set /a memory_size_in_mb=%suggest_%
) else (
if "%MEMORY_SIZE:~-1%"=="M" (
set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%
) else if "%MEMORY_SIZE:~-1%"=="G" (
set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%*1024
) else (
echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G."
exit /b 1
)
)
@REM set on heap memory size
@REM when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3
@REM when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4
@REM when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7
@REM when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024
if %memory_size_in_mb% LSS 4096 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/4*3
) else if %memory_size_in_mb% LSS 16384 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/5*4
) else if %memory_size_in_mb% LSS 131072 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/8*7
) else (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%-16384
)
set /a off_heap_memory_size_in_mb=%memory_size_in_mb%-%on_heap_memory_size_in_mb%
set ON_HEAP_MEMORY=%on_heap_memory_size_in_mb%M
set OFF_HEAP_MEMORY=%off_heap_memory_size_in_mb%M
set IOTDB_ALLOW_HEAP_DUMP="true"
@REM on heap memory size
@REM set ON_HEAP_MEMORY=2G
@REM off heap memory size
@REM set OFF_HEAP_MEMORY=512M
if "%OFF_HEAP_MEMORY:~-1%"=="M" (
set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%
) else if "%OFF_HEAP_MEMORY:~-1%"=="G" (
set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%*1024
)
@REM threads number of io
set IO_THREADS_NUMBER=100
@REM Max cached buffer size, Note: unit can only be B!
@REM which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER
set /a MAX_CACHED_BUFFER_SIZE=%off_heap_memory_size_in_mb%/%IO_THREADS_NUMBER%*1024*1024
set CONFIGNODE_HEAP_OPTS=-Xmx%ON_HEAP_MEMORY% -Xms%ON_HEAP_MEMORY%
set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -XX:MaxDirectMemorySize=%OFF_HEAP_MEMORY%
set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Djdk.nio.maxCachedBufferSize=%MAX_CACHED_BUFFER_SIZE%
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+CrashOnOutOfMemoryError
@REM if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance
@REM IOTDB_JMX_OPTS=%IOTDB_HEAP_OPTS% -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\tmp\confignode_heapdump.hprof
@REM You can put your env variable here
@REM set JAVA_HOME=%JAVA_HOME%
@REM set gc log.
IF "%1" equ "printgc" (
IF "%JAVA_VERSION%" == "8" (
md "%CONFIGNODE_HOME%\logs"
set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Xloggc:"%CONFIGNODE_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M
) ELSE (
md "%CONFIGNODE_HOME%\logs"
set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Xlog:gc=info,heap*=trace,age*=debug,safepoint=info,promotion*=trace:file="%CONFIGNODE_HOME%\logs\gc.log":time,uptime,pid,tid,level:filecount=10,filesize=10485760
)
)
@REM Add args for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396)
IF "%JAVA_VERSION%" == "8" (
set ILLEGAL_ACCESS_PARAMS=
) ELSE (
set ILLEGAL_ACCESS_PARAMS=--add-opens=java.base/java.util.concurrent=ALL-UNNAMED^
--add-opens=java.base/java.lang=ALL-UNNAMED^
--add-opens=java.base/java.util=ALL-UNNAMED^
--add-opens=java.base/java.nio=ALL-UNNAMED^
--add-opens=java.base/java.io=ALL-UNNAMED^
--add-opens=java.base/java.net=ALL-UNNAMED
)
echo ConfigNode on heap memory size = %ON_HEAP_MEMORY%B, off heap memory size = %OFF_HEAP_MEMORY%B
echo If you want to change this configuration, please check conf/confignode-env.bat.

View File

@@ -0,0 +1,314 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# You can set ConfigNode memory size, example '2G' or '2048M'
MEMORY_SIZE=
# You can put your env variable here
# export JAVA_HOME=$JAVA_HOME
# Set max number of open files
max_num=$(ulimit -n)
if [ $max_num -le 65535 ]; then
ulimit -n 65535
if [ $? -ne 0 ]; then
echo "Warning: Failed to set max number of files to be 65535, maybe you need to use 'sudo ulimit -n 65535' to set it when you use iotdb ConfigNode in production environments."
fi
fi
# Set somaxconn to a better value to avoid meaningless connection reset issues when the system is under high load.
# The original somaxconn will be set back when the system reboots.
# For more detail, see: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4
SOMAXCONN=65535
case "$(uname)" in
Linux)
somaxconn=$(sysctl -n net.core.somaxconn)
if [ "$somaxconn" -lt $SOMAXCONN ]; then
echo "WARN:"
echo "WARN: the value of net.core.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command."
echo "WARN: sudo sysctl -w net.core.somaxconn=$SOMAXCONN"
echo "WARN: The original net.core.somaxconn value will be set back when the os reboots."
echo "WARN:"
fi
;;
FreeBSD | Darwin)
somaxconn=$(sysctl -n kern.ipc.somaxconn)
if [ "$somaxconn" -lt $SOMAXCONN ]; then
echo "WARN:"
echo "WARN: the value of kern.ipc.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command."
echo "WARN: sudo sysctl -w kern.ipc.somaxconn=$SOMAXCONN"
echo "WARN: The original kern.ipc.somaxconn value will be set back when the os reboots."
echo "WARN:"
fi
;;
esac
# whether we allow enable heap dump files
IOTDB_ALLOW_HEAP_DUMP="true"
calculate_memory_sizes()
{
case "`uname`" in
Linux)
system_memory_in_mb=`free -m| sed -n '2p' | awk '{print $2}'`
system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
;;
FreeBSD)
system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
;;
SunOS)
system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
system_cpu_cores=`psrinfo | wc -l`
;;
Darwin)
system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
;;
*)
# assume reasonable defaults for e.g. a modern desktop or
# cheap server
system_memory_in_mb="2048"
system_cpu_cores="2"
;;
esac
# some systems like the raspberry pi don't report cores, use at least 1
if [ "$system_cpu_cores" -lt "1" ]
then
system_cpu_cores="1"
fi
# suggest using memory, system memory 3 / 10
suggest_using_memory_in_mb=`expr $system_memory_in_mb / 10 \* 3`
if [ -n "$MEMORY_SIZE" ]
then
if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] || [ "${MEMORY_SIZE%"M"}" != "$MEMORY_SIZE" ]
then
if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ]
then
memory_size_in_mb=`expr ${MEMORY_SIZE%"G"} "*" 1024`
else
memory_size_in_mb=`expr ${MEMORY_SIZE%"M"}`
fi
else
echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G"
exit 1
fi
else
# set memory size to suggest using memory, if suggest using memory is greater than 8GB, set memory size to 8GB
if [ "$suggest_using_memory_in_mb" -gt "8192" ]
then
memory_size_in_mb="8192"
else
memory_size_in_mb=$suggest_using_memory_in_mb
fi
fi
# set on heap memory size
# when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3
# when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4
# when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7
# when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024
if [ "$memory_size_in_mb" -lt "4096" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 4 \* 3`
elif [ "$memory_size_in_mb" -lt "16384" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 5 \* 4`
elif [ "$memory_size_in_mb" -lt "131072" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 8 \* 7`
else
on_heap_memory_size_in_mb=`expr $memory_size_in_mb - 16384`
fi
off_heap_memory_size_in_mb=`expr $memory_size_in_mb - $on_heap_memory_size_in_mb`
ON_HEAP_MEMORY="${on_heap_memory_size_in_mb}M"
OFF_HEAP_MEMORY="${off_heap_memory_size_in_mb}M"
}
CONFIGNODE_CONF_DIR="`dirname "$0"`"
get_cn_system_dir() {
local config_file="$1"
local cn_system_dir=""
cn_system_dir=`sed '/^cn_system_dir=/!d;s/.*=//' ${CONFIGNODE_CONF_DIR}/${config_file} | tail -n 1`
if [ -z "$cn_system_dir" ]; then
echo ""
return 0
fi
if [[ "$cn_system_dir" == /* ]]; then
echo "$cn_system_dir"
else
echo "$CONFIGNODE_CONF_DIR/../$cn_system_dir"
fi
}
if [ -f "${CONFIGNODE_CONF_DIR}/iotdb-system.properties" ]; then
heap_dump_dir=$(get_cn_system_dir "iotdb-system.properties")
else
heap_dump_dir=$(get_cn_system_dir "iotdb-confignode.properties")
fi
if [ -z "$heap_dump_dir" ]; then
heap_dump_dir="$(dirname "$0")/../data/confignode/system"
fi
if [ ! -d "$heap_dump_dir" ]; then
mkdir -p "$heap_dump_dir"
fi
# find java in JAVA_HOME
if [ -n "$JAVA_HOME" ]; then
for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
if [ -x "$java" ]; then
JAVA="$java"
break
fi
done
else
JAVA=java
fi
if [ -z $JAVA ] ; then
echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
exit 1;
fi
# Determine the sort of JVM we'll be running on.
java_ver_output=`"$JAVA" -version 2>&1`
jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1`
JVM_VERSION=${jvmver%_*}
JVM_PATCH_VERSION=${jvmver#*_}
if [ "$JVM_VERSION" \< "1.8" ] ; then
echo "IoTDB requires Java 8u92 or later."
exit 1;
fi
if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 92 ] ; then
echo "IoTDB requires Java 8u92 or later."
exit 1;
fi
version_arr=(${JVM_VERSION//./ })
illegal_access_params=""
#GC log path has to be defined here because it needs to access CONFIGNODE_HOME
if [ "${version_arr[0]}" = "1" ] ; then
# Java 8
MAJOR_VERSION=${version_arr[1]}
echo "$CONFIGNODE_JMX_OPTS" | grep -q "^-[X]loggc"
if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
# only add -Xlog:gc if it's not mentioned in jvm-server.options file
mkdir -p ${CONFIGNODE_HOME}/logs
if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xloggc:${CONFIGNODE_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M"
fi
fi
else
#JDK 11 and others
MAJOR_VERSION=${version_arr[0]}
# See description of https://bugs.openjdk.java.net/browse/JDK-8046148 for details about the syntax
# The following is the equivalent to -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M
echo "$CONFIGNODE_JMX_OPTS" | grep -q "^-[X]log:gc"
if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
# only add -Xlog:gc if it's not mentioned in jvm-server.options file
mkdir -p ${CONFIGNODE_HOME}/logs
if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xlog:gc=info,heap*=info,age*=info,safepoint=info,promotion*=info:file=${CONFIGNODE_HOME}/logs/gc.log:time,uptime,pid,tid,level:filecount=10,filesize=10485760"
fi
fi
# Add argLine for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396)
illegal_access_params="$illegal_access_params --add-opens=java.base/java.util.concurrent=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.lang=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.util=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.nio=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.io=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.net=ALL-UNNAMED"
fi
calculate_memory_sizes
# on heap memory size
#ON_HEAP_MEMORY="2G"
# off heap memory size
#OFF_HEAP_MEMORY="512M"
if [ "${OFF_HEAP_MEMORY%"G"}" != "$OFF_HEAP_MEMORY" ]
then
off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"G"} "*" 1024`
else
off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"M"}`
fi
# threads number of io
IO_THREADS_NUMBER="100"
# Max cached buffer size, Note: unit can only be B!
# which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER
MAX_CACHED_BUFFER_SIZE=`expr $off_heap_memory_size_in_mb \* 1024 \* 1024 / $IO_THREADS_NUMBER`
#true or false
#DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (${CONFIGNODE_CONF}/jmx.password and ${CONFIGNODE_CONF}/jmx.access)
#If you want to connect JMX Service by network in local machine, such as nodeTool.sh will try to connect 127.0.0.1:31999, please set JMX_LOCAL to false.
JMX_LOCAL="true"
JMX_PORT="32000"
#only take effect when the jmx_local=false
#You need to change this IP as a public IP if you want to remotely connect IoTDB ConfigNode by JMX.
# 0.0.0.0 is not allowed
JMX_IP="127.0.0.1"
if [ ${JMX_LOCAL} = "false" ]; then
echo "setting remote JMX..."
#you may have no permission to run chmod. If so, contact your system administrator.
chmod 600 ${CONFIGNODE_CONF}/jmx.password
chmod 600 ${CONFIGNODE_CONF}/jmx.access
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djava.rmi.server.randomIDs=true"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.ssl=false"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.authenticate=true"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=${CONFIGNODE_CONF}/jmx.password"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=${CONFIGNODE_CONF}/jmx.access"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djava.rmi.server.hostname=$JMX_IP"
else
echo "setting local JMX..."
fi
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Diotdb.jmx.local=$JMX_LOCAL"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xms${ON_HEAP_MEMORY}"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xmx${ON_HEAP_MEMORY}"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -XX:MaxDirectMemorySize=${OFF_HEAP_MEMORY}"
CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djdk.nio.maxCachedBufferSize=${MAX_CACHED_BUFFER_SIZE}"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+CrashOnOutOfMemoryError"
# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance
#IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/confignode_heapdump.hprof"
echo "ConfigNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B"
echo "If you want to change this configuration, please check conf/confignode-env.sh."

View File

@@ -0,0 +1,187 @@
@REM
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM http://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM
@echo off
@REM You can set datanode memory size, example '2G' or '2048M'
set MEMORY_SIZE=
@REM true or false
@REM DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (%IOTDB_CONF%\jmx.password and %{IOTDB_CONF%\jmx.access)
set JMX_LOCAL="true"
set JMX_PORT="31999"
@REM only take effect when the jmx_local=false
@REM You need to change this IP as a public IP if you want to remotely connect IoTDB by JMX.
@REM 0.0.0.0 is not allowed
set JMX_IP="127.0.0.1"
if %JMX_LOCAL% == "false" (
echo "setting remote JMX..."
@REM you may have no permission to run chmod. If so, contact your system administrator.
set IOTDB_JMX_OPTS=-Dcom.sun.management.jmxremote^
-Dcom.sun.management.jmxremote.port=%JMX_PORT%^
-Dcom.sun.management.jmxremote.rmi.port=%JMX_PORT%^
-Djava.rmi.server.randomIDs=true^
-Dcom.sun.management.jmxremote.ssl=false^
-Dcom.sun.management.jmxremote.authenticate=false^
-Dcom.sun.management.jmxremote.password.file="%IOTDB_CONF%\jmx.password"^
-Dcom.sun.management.jmxremote.access.file="%IOTDB_CONF%\jmx.acces"s^
-Djava.rmi.server.hostname=%JMX_IP%
) else (
echo "setting local JMX..."
)
set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Diotdb.jmx.local=%JMX_LOCAL%
for /f %%b in ('wmic cpu get numberofcores ^| findstr "[0-9]"') do (
set system_cpu_cores=%%b
)
if %system_cpu_cores% LSS 1 set system_cpu_cores=1
for /f %%b in ('wmic ComputerSystem get TotalPhysicalMemory ^| findstr "[0-9]"') do (
set system_memory=%%b
)
echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > "%IOTDB_HOME%\sbin\tmp.vbs"
for /f "tokens=*" %%a in ('cscript //nologo "%IOTDB_HOME%\sbin\tmp.vbs"') do set system_memory_in_mb=%%a
del "%IOTDB_HOME%\sbin\tmp.vbs"
set system_memory_in_mb=%system_memory_in_mb:,=%
set /a suggest_=%system_memory_in_mb%/2
if "%MEMORY_SIZE%"=="" (
set /a memory_size_in_mb=%suggest_%
) else (
if "%MEMORY_SIZE:~-1%"=="M" (
set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%
) else if "%MEMORY_SIZE:~-1%"=="G" (
set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%*1024
) else (
echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G."
exit /b 1
)
)
@REM set on heap memory size
@REM when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3
@REM when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4
@REM when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7
@REM when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024
if %memory_size_in_mb% LSS 4096 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/4*3
) else if %memory_size_in_mb% LSS 16384 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/5*4
) else if %memory_size_in_mb% LSS 131072 (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/8*7
) else (
set /a on_heap_memory_size_in_mb=%memory_size_in_mb%-16384
)
set /a off_heap_memory_size_in_mb=%memory_size_in_mb%-%on_heap_memory_size_in_mb%
set ON_HEAP_MEMORY=%on_heap_memory_size_in_mb%M
set OFF_HEAP_MEMORY=%off_heap_memory_size_in_mb%M
set IOTDB_ALLOW_HEAP_DUMP="true"
@REM on heap memory size
@REM set ON_HEAP_MEMORY=2G
@REM off heap memory size
@REM set OFF_HEAP_MEMORY=512M
if "%OFF_HEAP_MEMORY:~-1%"=="M" (
set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%
) else if "%OFF_HEAP_MEMORY:~-1%"=="G" (
set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%*1024
)
@REM threads number of io
set IO_THREADS_NUMBER=1000
@REM Max cached buffer size, Note: unit can only be B!
@REM which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER
set /a MAX_CACHED_BUFFER_SIZE=%off_heap_memory_size_in_mb%/%IO_THREADS_NUMBER%*1024*1024
set IOTDB_HEAP_OPTS=-Xmx%ON_HEAP_MEMORY% -Xms%ON_HEAP_MEMORY%
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:MaxDirectMemorySize=%OFF_HEAP_MEMORY%
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Djdk.nio.maxCachedBufferSize=%MAX_CACHED_BUFFER_SIZE%
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+CrashOnOutOfMemoryError
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UseAdaptiveSizePolicy
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xss512k
@REM options below try to optimize safepoint stw time.
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UnlockDiagnosticVMOptions
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:GuaranteedSafepointInterval=0
@REM these two options print safepoints with pauses longer than 1000ms to the standard output. You can see these logs via redirection when starting in the background like "start-datanode.sh > log_datanode_safepoint.txt"
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:SafepointTimeoutDelay=1000
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+SafepointTimeout
@REM option below tries to optimize safepoint stw time for large counted loop.
@REM NOTE: it may have an impact on JIT's black-box optimization.
@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UseCountedLoopSafepoints
@REM When the GC time is too long, if there are remaining CPU resources, you can try to turn on and increase options below.
@REM for /F "tokens=2 delims==" %%I in ('wmic cpu get NumberOfCores /value') do (
@REM set "CPU_PROCESSOR_NUM=%%I"
@REM )
@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:ParallelGCThreads=%CPU_PROCESSOR_NUM%
@REM if there are much of stw time of reference process in GC log, you can turn on option below.
@REM NOTE: it may have an impact on application's throughput.
@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+ParallelRefProcEnabled
@REM this option can reduce the overhead caused by memory allocation, page fault interrupts, etc. during JVM operation.
@REM NOTE: it may reduce memory utilization and trigger OOM killer when memory is tight.
@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+AlwaysPreTouch
@REM if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance
@REM set IOTDB_JMX_OPTS=%IOTDB_HEAP_OPTS% -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\tmp\datanode_heapdump.hprof
@REM You can put your env variable here
@REM set JAVA_HOME=%JAVA_HOME%
@REM set gc log.
IF "%1" equ "printgc" (
IF "%JAVA_VERSION%" == "8" (
md "%IOTDB_HOME%\logs"
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xloggc:"%IOTDB_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M
@REM For more detailed GC information, you can uncomment option below.
@REM NOTE: more detailed GC information may bring larger GC log files.
@REM set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Xloggc:"%IOTDB_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:+PrintTenuringDistribution -XX:+PrintHeapAtGC -XX:+PrintReferenceGC -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M
) ELSE (
md "%IOTDB_HOME%\logs"
set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xlog:gc=info,heap*=trace,age*=debug,safepoint=info,promotion*=trace:file="%IOTDB_HOME%\logs\gc.log":time,uptime,pid,tid,level:filecount=10,filesize=10485760
@REM For more detailed GC information, you can uncomment option below.
@REM NOTE: more detailed GC information may bring larger GC log files.
@REM set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Xlog:gc*=debug,heap*=debug,age*=trace,metaspace*=info,safepoint*=debug,promotion*=info:file="%IOTDB_HOME%\logs\gc.log":time,uptime,pid,tid,level,tags:filecount=10,filesize=100M
)
)
@REM Add args for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396)
IF "%JAVA_VERSION%" == "8" (
set ILLEGAL_ACCESS_PARAMS=
) ELSE (
set ILLEGAL_ACCESS_PARAMS=--add-opens=java.base/java.util.concurrent=ALL-UNNAMED^
--add-opens=java.base/java.lang=ALL-UNNAMED^
--add-opens=java.base/java.util=ALL-UNNAMED^
--add-opens=java.base/java.nio=ALL-UNNAMED^
--add-opens=java.base/java.io=ALL-UNNAMED^
--add-opens=java.base/java.net=ALL-UNNAMED
)
echo DataNode on heap memory size = %ON_HEAP_MEMORY%B, off heap memory size = %OFF_HEAP_MEMORY%B
echo If you want to change this configuration, please check conf\datanode-env.bat.

View File

@@ -0,0 +1,351 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# You can set DataNode memory size, example '2G' or '2048M'
MEMORY_SIZE=
# You can put your env variable here
# export JAVA_HOME=$JAVA_HOME
# Set max number of open files
max_num=$(ulimit -n)
if [ $max_num -le 65535 ]; then
ulimit -n 65535
if [ $? -ne 0 ]; then
echo "Warning: Failed to set max number of files to be 65535, maybe you need to use 'sudo ulimit -n 65535' to set it when you use iotdb in production environments."
fi
fi
# Set somaxconn to a better value to avoid meaningless connection reset issues when the system is under high load.
# The original somaxconn will be set back when the system reboots.
# For more detail, see: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4
SOMAXCONN=65535
case "$(uname)" in
Linux)
somaxconn=$(sysctl -n net.core.somaxconn)
if [ "$somaxconn" -lt $SOMAXCONN ]; then
echo "WARN:"
echo "WARN: the value of net.core.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command."
echo "WARN: sudo sysctl -w net.core.somaxconn=$SOMAXCONN"
echo "WARN: The original net.core.somaxconn value will be set back when the os reboots."
echo "WARN:"
fi
;;
FreeBSD | Darwin)
somaxconn=$(sysctl -n kern.ipc.somaxconn)
if [ "$somaxconn" -lt $SOMAXCONN ]; then
echo "WARN:"
echo "WARN: the value of kern.ipc.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command."
echo "WARN: sudo sysctl -w kern.ipc.somaxconn=$SOMAXCONN"
echo "WARN: The original kern.ipc.somaxconn value will be set back when the os reboots."
echo "WARN:"
fi
;;
esac
# whether we allow enable heap dump files
IOTDB_ALLOW_HEAP_DUMP="true"
calculate_memory_sizes()
{
case "`uname`" in
Linux)
system_memory_in_mb=`free -m| sed -n '2p' | awk '{print $2}'`
system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
;;
FreeBSD)
system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
;;
SunOS)
system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
system_cpu_cores=`psrinfo | wc -l`
;;
Darwin)
system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
;;
*)
# assume reasonable defaults for e.g. a modern desktop or
# cheap server
system_memory_in_mb="2048"
system_cpu_cores="2"
;;
esac
# some systems like the raspberry pi don't report cores, use at least 1
if [ "$system_cpu_cores" -lt "1" ]
then
system_cpu_cores="1"
fi
# suggest using memory, system memory 1 / 2
suggest_using_memory_in_mb=`expr $system_memory_in_mb / 2`
if [ -n "$MEMORY_SIZE" ]
then
if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] || [ "${MEMORY_SIZE%"M"}" != "$MEMORY_SIZE" ]
then
if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ]
then
memory_size_in_mb=`expr ${MEMORY_SIZE%"G"} "*" 1024`
else
memory_size_in_mb=`expr ${MEMORY_SIZE%"M"}`
fi
else
echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G"
exit 1
fi
else
memory_size_in_mb=$suggest_using_memory_in_mb
fi
# set on heap memory size
# when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3
# when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4
# when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7
# when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024
if [ "$memory_size_in_mb" -lt "4096" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 4 \* 3`
elif [ "$memory_size_in_mb" -lt "16384" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 5 \* 4`
elif [ "$memory_size_in_mb" -lt "131072" ]
then
on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 8 \* 7`
else
on_heap_memory_size_in_mb=`expr $memory_size_in_mb - 16384`
fi
off_heap_memory_size_in_mb=`expr $memory_size_in_mb - $on_heap_memory_size_in_mb`
ON_HEAP_MEMORY="${on_heap_memory_size_in_mb}M"
OFF_HEAP_MEMORY="${off_heap_memory_size_in_mb}M"
}
DATANODE_CONF_DIR="`dirname "$0"`"
# find first dir of dn_data_dirs from properties file
get_first_data_dir() {
local config_file="$1"
local data_dir_value=""
data_dir_value=`sed '/^dn_data_dirs=/!d;s/.*=//' ${DATANODE_CONF_DIR}/${config_file} | tail -n 1`
if [ -z "$data_dir_value" ]; then
echo ""
return 0
fi
local first_dir=""
if [[ "$data_dir_value" == *";"* ]]; then
first_dir=$(echo "$data_dir_value" | cut -d';' -f1)
fi
if [[ "$first_dir" == *","* ]]; then
first_dir=$(echo "$first_dir" | cut -d',' -f1)
fi
if [[ "$first_dir" == /* ]]; then
echo "$first_dir"
else
echo "$DATANODE_CONF_DIR/../$first_dir"
fi
}
if [ -f "${DATANODE_CONF_DIR}/iotdb-system.properties" ]; then
heap_dump_dir=$(get_first_data_dir "iotdb-system.properties")
else
heap_dump_dir=$(get_first_data_dir "iotdb-datanode.properties")
fi
if [ -z "$heap_dump_dir" ]; then
heap_dump_dir="$(dirname "$0")/../data/datanode/data"
fi
if [ ! -d "$heap_dump_dir" ]; then
mkdir -p "$heap_dump_dir"
fi
# find java in JAVA_HOME
if [ -n "$JAVA_HOME" ]; then
for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
if [ -x "$java" ]; then
JAVA="$java"
break
fi
done
else
JAVA=java
fi
if [ -z $JAVA ] ; then
echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
exit 1;
fi
# Determine the sort of JVM we'll be running on.
java_ver_output=`"$JAVA" -version 2>&1`
jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1`
JVM_VERSION=${jvmver%_*}
JVM_PATCH_VERSION=${jvmver#*_}
if [ "$JVM_VERSION" \< "1.8" ] ; then
echo "IoTDB requires Java 8u92 or later."
exit 1;
fi
if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 92 ] ; then
echo "IoTDB requires Java 8u92 or later."
exit 1;
fi
version_arr=(${JVM_VERSION//./ })
illegal_access_params=""
#GC log path has to be defined here because it needs to access IOTDB_HOME
if [ "${version_arr[0]}" = "1" ] ; then
# Java 8
MAJOR_VERSION=${version_arr[1]}
echo "$IOTDB_JMX_OPTS" | grep -q "^-[X]loggc"
if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
# only add -Xlog:gc if it's not mentioned in jvm-server.options file
mkdir -p ${IOTDB_HOME}/logs
if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xloggc:${IOTDB_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M"
# For more detailed GC information, you can uncomment option below.
# NOTE: more detailed GC information may bring larger GC log files.
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xloggc:${IOTDB_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:+PrintTenuringDistribution -XX:+PrintHeapAtGC -XX:+PrintReferenceGC -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
fi
else
#JDK 11 and others
MAJOR_VERSION=${version_arr[0]}
# See description of https://bugs.openjdk.java.net/browse/JDK-8046148 for details about the syntax
# The following is the equivalent to -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M
echo "$IOTDB_JMX_OPTS" | grep -q "^-[X]log:gc"
if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line
# only add -Xlog:gc if it's not mentioned in jvm-server.options file
mkdir -p ${IOTDB_HOME}/logs
if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xlog:gc=info,heap*=info,age*=info,safepoint=info,promotion*=info:file=${IOTDB_HOME}/logs/gc.log:time,uptime,pid,tid,level:filecount=10,filesize=10485760"
# For more detailed GC information, you can uncomment option below.
# NOTE: more detailed GC information may bring larger GC log files.
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xlog:gc*=debug,heap*=debug,age*=trace,metaspace*=info,safepoint*=debug,promotion*=info:file=${IOTDB_HOME}/logs/gc.log:time,uptime,pid,tid,level,tags:filecount=10,filesize=100M"
fi
fi
# Add argLine for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396)
illegal_access_params="$illegal_access_params --add-opens=java.base/java.util.concurrent=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.lang=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.util=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.nio=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.io=ALL-UNNAMED"
illegal_access_params="$illegal_access_params --add-opens=java.base/java.net=ALL-UNNAMED"
fi
calculate_memory_sizes
# on heap memory size
#ON_HEAP_MEMORY="2G"
# off heap memory size
#OFF_HEAP_MEMORY="512M"
if [ "${OFF_HEAP_MEMORY%"G"}" != "$OFF_HEAP_MEMORY" ]
then
off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"G"} "*" 1024`
else
off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"M"}`
fi
# threads number for io
IO_THREADS_NUMBER="1000"
# Max cached buffer size, Note: unit can only be B!
# which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER
MAX_CACHED_BUFFER_SIZE=`expr $off_heap_memory_size_in_mb \* 1024 \* 1024 / $IO_THREADS_NUMBER`
#true or false
#DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (${IOTDB_CONF}/jmx.password and ${IOTDB_CONF}/jmx.access)
#If you want to connect JMX Service by network in local machine, such as nodeTool.sh will try to connect 127.0.0.1:31999, please set JMX_LOCAL to false.
JMX_LOCAL="true"
JMX_PORT="31999"
#only take effect when the jmx_local=false
#You need to change this IP as a public IP if you want to remotely connect IoTDB by JMX.
# 0.0.0.0 is not allowed
JMX_IP="127.0.0.1"
if [ ${JMX_LOCAL} = "false" ]; then
echo "setting remote JMX..."
#you may have no permission to run chmod. If so, contact your system administrator.
chmod 600 ${IOTDB_CONF}/jmx.password
chmod 600 ${IOTDB_CONF}/jmx.access
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djava.rmi.server.randomIDs=true"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.ssl=false"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.authenticate=true"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=${IOTDB_CONF}/jmx.password"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=${IOTDB_CONF}/jmx.access"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djava.rmi.server.hostname=$JMX_IP"
else
echo "setting local JMX..."
fi
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Diotdb.jmx.local=$JMX_LOCAL"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xms${ON_HEAP_MEMORY}"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xmx${ON_HEAP_MEMORY}"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:MaxDirectMemorySize=${OFF_HEAP_MEMORY}"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djdk.nio.maxCachedBufferSize=${MAX_CACHED_BUFFER_SIZE}"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+CrashOnOutOfMemoryError"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+UseAdaptiveSizePolicy"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xss512k"
# these two options print safepoints with pauses longer than 1000ms to the standard output. You can see these logs via redirection when starting in the background like "start-datanode.sh > log_datanode_safepoint.log"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:SafepointTimeoutDelay=1000"
IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+SafepointTimeout"
# option below tries to optimize safepoint stw time for large counted loop.
# NOTE: it may have an impact on JIT's black-box optimization.
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+UseCountedLoopSafepoints"
# when the GC time is too long, if there are remaining CPU resources, you can try to turn on and increase options below.
# for Linux:
# CPU_PROCESSOR_NUM=$(nproc)
# for MacOS:
# CPU_PROCESSOR_NUM=$(sysctl -n hw.ncpu)
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:ParallelGCThreads=${CPU_PROCESSOR_NUM}"
# if there are much of stw time of reference process in GC log, you can turn on option below.
# NOTE: it may have an impact on application's throughput.
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+ParallelRefProcEnabled"
# this option can reduce the overhead caused by memory allocation, page fault interrupts, etc. during JVM operation.
# NOTE: it may reduce memory utilization and trigger OOM killer when memory is tight.
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+AlwaysPreTouch"
# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance
# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/datanode_heapdump.hprof"
echo "DataNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B"
echo "If you want to change this configuration, please check conf/datanode-env.sh."

View File

@@ -0,0 +1,33 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This configuration file needs to be configured only when the start-all.sh,stop-all.sh, and destroy.sh scripts are required.
# You also need to modify this configuration file when the cluster nodes change
# Configure ConfigNodes machine addresses separated by ,
confignode_address_list=
# Configure DataNodes machine addresses separated by ,
datanode_address_list=
# User name for logging in to the deployment machine using ssh
ssh_account=root
# ssh login port
ssh_port=22
# iotdb deployment directory (iotdb should be deployed to the following folders in all machines)
confignode_deploy_path=
datanode_deploy_path=

View File

@@ -0,0 +1,72 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
####################
### Cluster Configuration
####################
cluster_name=defaultCluster
####################
### Seed ConfigNode
####################
cn_seed_config_node=127.0.0.1:10710
dn_seed_config_node=127.0.0.1:10710
####################
### Node RPC Configuration
####################
cn_internal_address=127.0.0.1
cn_internal_port=10710
cn_consensus_port=10720
dn_rpc_address=0.0.0.0
dn_rpc_port=6667
dn_internal_address=127.0.0.1
dn_internal_port=10730
dn_mpp_data_exchange_port=10740
dn_schema_region_consensus_port=10750
dn_data_region_consensus_port=10760
####################
### Replication configuration
####################
schema_replication_factor=1
data_replication_factor=1
####################
### Directory Configuration
####################
# dn_data_dirs=data/datanode/data
# dn_wal_dirs=data/datanode/wal
####################
### Metric Configuration
####################
# cn_metric_reporter_list=
cn_metric_prometheus_reporter_port=9091
# dn_metric_reporter_list=
dn_metric_prometheus_reporter_port=9092

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# see https://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html#gdeup
iotdb readonly
root readwrite

View File

@@ -0,0 +1,22 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# see https://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html#gdeup
iotdb passw!d
root passw!d

View File

@@ -0,0 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration scan="true" scanPeriod="60 seconds">
<appender class="ch.qos.logback.core.ConsoleAppender" name="stdout">
<Target>System.out</Target>
<encoder>
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="backup">
<Encoding>UTF-8</Encoding>
<file>${IOTDB_HOME}/logs/log_backup.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-backup-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<root level="all">
<appender-ref ref="stdout"/>
<appender-ref ref="backup"/>
</root>
</configuration>

View File

@@ -0,0 +1,112 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration scan="true" scanPeriod="60 seconds">
<jmxConfigurator/>
<!-- prevent logback from outputting its own status at the start of every log -->
<statusListener class="ch.qos.logback.core.status.NopStatusListener"/>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEERROR">
<file>${CONFIGNODE_HOME}/logs/log_confignode_error.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${CONFIGNODE_HOME}/logs/log-confignode-error-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>error</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEWARN">
<file>${CONFIGNODE_HOME}/logs/log_confignode_warn.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${CONFIGNODE_HOME}/logs/log-confignode-warn-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEDEBUG">
<file>${CONFIGNODE_HOME}/logs/log_confignode_debug.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${CONFIGNODE_HOME}/logs/log-confignode-debug-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.ConsoleAppender" name="stdout">
<Target>System.out</Target>
<encoder>
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>${CONSOLE_LOG_LEVEL:-DEBUG}</level>
</filter>
</appender>
<!-- a log appender that collect all log records whose level is greater than debug-->
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEALL">
<file>${CONFIGNODE_HOME}/logs/log_confignode_all.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${CONFIGNODE_HOME}/logs/log-confignode-all-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<root level="info">
<appender-ref ref="FILEDEBUG"/>
<appender-ref ref="FILEWARN"/>
<appender-ref ref="FILEERROR"/>
<appender-ref ref="FILEALL"/>
<appender-ref ref="stdout"/>
</root>
<logger level="info" name="org.apache.iotdb.confignode"/>
<logger level="info" name="org.apache.ratis"/>
</configuration>

View File

@@ -0,0 +1,267 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration scan="true" scanPeriod="60 seconds">
<jmxConfigurator/>
<!-- prevent logback from outputting its own status at the start of every log -->
<statusListener class="ch.qos.logback.core.status.NopStatusListener"/>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEERROR">
<file>${IOTDB_HOME}/logs/log_datanode_error.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-error-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>error</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEWARN">
<file>${IOTDB_HOME}/logs/log_datanode_warn.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-warn-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEDEBUG">
<file>${IOTDB_HOME}/logs/log_datanode_debug.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-debug-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILETRACE">
<file>${IOTDB_HOME}/logs/log_datanode_trace.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-trace-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>TRACE</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender class="ch.qos.logback.core.ConsoleAppender" name="stdout">
<Target>System.out</Target>
<encoder>
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>${CONSOLE_LOG_LEVEL:-DEBUG}</level>
</filter>
</appender>
<!-- a log appender that collect all log records whose level is greater than debug-->
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILEALL">
<file>${IOTDB_HOME}/logs/log_datanode_all.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-all-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="FILE_COST_MEASURE">
<file>${IOTDB_HOME}/logs/log_datanode_measure.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-measure-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="AUDIT">
<file>${IOTDB_HOME}/logs/log_datanode_audit.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-audit-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="QUERY_DEBUG">
<file>${IOTDB_HOME}/logs/log_datanode_query_debug.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-query-debug-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="SLOW_SQL">
<file>${IOTDB_HOME}/logs/log_datanode_slow_sql.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-slow-sql-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="COMPACTION">
<file>${IOTDB_HOME}/logs/log_datanode_compaction.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-compaction-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="PIPE">
<file>${IOTDB_HOME}/logs/log_datanode_pipe.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-pipe-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EXPLAIN_ANALYZE">
<file>${IOTDB_HOME}/logs/log_explain_analyze.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${IOTDB_HOME}/logs/log-datanode-explain-%d{yyyyMMdd}.log.gz</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<append>true</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<root level="info">
<appender-ref ref="FILETRACE"/>
<appender-ref ref="FILEDEBUG"/>
<appender-ref ref="FILEWARN"/>
<appender-ref ref="FILEERROR"/>
<appender-ref ref="FILEALL"/>
<appender-ref ref="stdout"/>
</root>
<logger level="OFF" name="io.moquette.broker.metrics.MQTTMessageLogger"/>
<logger level="info" name="org.apache.iotdb.db.service"/>
<logger level="info" name="org.apache.iotdb.db.conf"/>
<logger level="info" name="org.apache.iotdb.db.cost.statistic">
<appender-ref ref="FILE_COST_MEASURE"/>
</logger>
<logger level="info" name="IoTDB_AUDIT_LOGGER">
<appender-ref ref="AUDIT"/>
</logger>
<logger level="info" name="QUERY_DEBUG">
<appender-ref ref="QUERY_DEBUG"/>
</logger>
<logger level="info" name="SLOW_SQL">
<appender-ref ref="SLOW_SQL"/>
</logger>
<logger level="info" name="QUERY_FREQUENCY">
<appender-ref ref="QUERY_FREQUENCY"/>
</logger>
<logger level="info" name="DETAILED_FAILURE_QUERY_TRACE"/>
<logger level="info" name="COMPACTION">
<appender-ref ref="COMPACTION"/>
</logger>
<logger level="info" name="org.apache.iotdb.pipe.api">
<appender-ref ref="PIPE"/>
</logger>
<logger level="info" name="org.apache.iotdb.db.pipe">
<appender-ref ref="PIPE"/>
</logger>
<logger level="info" name="org.apache.iotdb.commons.pipe">
<appender-ref ref="PIPE"/>
</logger>
<logger level="info" name="EXPLAIN_ANALYZE">
<appender-ref ref="EXPLAIN_ANALYZE"/>
</logger>
</configuration>

View File

@@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration scan="true" scanPeriod="60 seconds">
<appender class="ch.qos.logback.core.ConsoleAppender" name="stdout">
<Target>System.out</Target>
<encoder>
<pattern>%d [%t] %-5p %C{25}:%L - %m %n</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
</appender>
<root level="error">
<appender-ref ref="stdout"/>
</root>
</configuration>

View File

@@ -0,0 +1,67 @@
# This is a automacically generated configuration file for Explorer in [TOML](https://toml.io/) format.
#
# Here is a full list of available options.
# Explorer server port to listen on.
# Default is 6060.
#
port = 6060
# IPv4 listen address.
# Default is 0.0.0.0
addr = "0.0.0.0"
# IPv6 listen address.
# ipv6 = "::1"
# Explorer server log level.
# Default is "info"
#
log_level = "info"
# REST API endpoint to connect to the cluster.
# This configuration is also the target for data migration tasks.
#
# Default is "http://buildkitsandbox:6041" - the default endpoint for REST API.
#
cluster = "http://fastbee:6041"
# native endpoint to connect to the cluster.
# Default is disabled. To enable it, set it to the native API URL like "taos://buildkitsandbox:6030" and uncomment it.
# If you enable it, you will get more performance for data migration tasks.
#
# cluster_native = "taos://buildkitsandbox:6030"
# API endpoint for data replication/backup/data sources. No default option.
# Set it to API URL like "http://buildkitsandbox:6050".
#
x_api ="http://fastbee:6050"
# GRPC endpoint for "Agent"s.
# Default is "http://buildkitsandbox:6055" - the default endpoint for taosX grpc API.
# You should set it to public IP or FQDN name like:
# "http://192.168.111.111:6055" or "http://node1.company.domain:6055" and
# ensure to add the port to the exception list of the firewall if it enabled.
grpc = "http://fastbee:6055"
# CORS configuration switch, it allows cross-origin access
cors = true
# cloud open api.
# cloud_open_api = "https://pre.ali.cloud.taosdata.com/openapi"
# Enable ssl
# If the following two files exist, enable ssl protocol
#
[ssl]
# SSL certificate
#
# certificate = "/path/to/ca.file" # on linux/macOS
# certificate = "C:\\path\\to\\ca.file" # on windows
# SSL certificate key
#
# certificate_key = "/path/to/key.file" # on linux/macOS
# certificate_key = "C:\\path\\to\\key.file" # on windows

View File

@@ -0,0 +1,193 @@
########################################################
# #
# Configuration #
# #
########################################################
######### 0. Client only configurations #############
# The interval for CLI to send heartbeat to mnode
# shellActivityTimer 3
############### 1. Cluster End point ############################
# The end point of the first dnode in the cluster to be connected to when this dnode or the CLI utility is started
# firstEp hostname:6030
# The end point of the second dnode to be connected to if the firstEp is not available
# secondEp
############### 2. Configuration Parameters of current dnode #####
# The FQDN of the host on which this dnode will be started. It can be IP address
fqdn fastbee
# The port for external access after this dnode is started
# serverPort 6030
# The maximum number of connections a dnode can accept
# maxShellConns 5000
# The directory for writing log files, if you are using Windows platform please change to Windows path
# logDir /var/log/taos
# All data files are stored in this directory, if you are using Windows platform please change to Windows path
# dataDir /var/lib/taos
# temporary file's directory, if you are using Windows platform please change to Windows path
# tempDir /tmp/
# Switch for allowing to collect and report service usage information
# telemetryReporting 1
# Switch for allowing to collect and report crash information
# crashReporting 1
# The maximum number of vnodes supported by this dnode
# supportVnodes 0
# The interval of this dnode reporting status to mnode, [1..10] seconds
# statusInterval 1
# The minimum sliding window time, milli-second
# minSlidingTime 10
# The minimum time window, milli-second
# minIntervalTime 10
# The maximum allowed query buffer size in MB during query processing for each data node
# -1 no limit (default)
# 0 no query allowed, queries are disabled
# queryBufferSize -1
# The compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
# compressMsgSize -1
# query retrieved column data compression option:
# -1 (no compression)
# 0 (all retrieved column data compressed),
# > 0 (any retrieved column size greater than this value all data will be compressed.)
# compressColData -1
# system time zone
# timezone UTC-8
# system time zone (for windows 10)
# timezone Asia/Shanghai (CST, +0800)
# system locale
# locale en_US.UTF-8
# system charset
# charset UTF-8
# stop writing logs when the disk size of the log folder is less than this value
# minimalLogDirGB 1.0
# stop writing temporary files when the disk size of the tmp folder is less than this value
# minimalTmpDirGB 1.0
# if free disk space is less than this value, this dnode will fail to start
# minimalDataDirGB 2.0
# enable/disable system monitor
# monitor 1
# enable/disable audit log
# audit 1
# enable/disable audit create table
# auditCreateTable 1
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
# write log in async way: 1 - async, 0 - sync
# asyncLog 1
# time period of keeping log files, in days
# logKeepDays 0
############ 3. Debug Flag and levels #############################################
# The following parameters are used for debug purpose only by this dnode.
# debugFlag is a 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# Available debug levels are:
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for timer
# tmrDebugFlag 131
# debug flag for util
# uDebugFlag 131
# debug flag for rpc
# rpcDebugFlag 131
# debug flag for jni
# jniDebugFlag 131
# debug flag for query
# qDebugFlag 131
# debug flag for client driver
# cDebugFlag 131
# debug flag for dnode messages
# dDebugFlag 135
# debug flag for vnode
# vDebugFlag 131
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for wal
# wDebugFlag 135
# debug flag for sync module
# sDebugFlag 135
# debug flag for tsdb
# tsdbDebugFlag 131
# debug flag for tq
# tqDebugFlag 131
# debug flag for fs
# fsDebugFlag 131
# debug flag for udf
# udfDebugFlag 131
# debug flag for sma
# smaDebugFlag 131
# debug flag for index
# idxDebugFlag 131
# debug flag for tdb
# tdbDebugFlag 131
# debug flag for meta
# metaDebugFlag 131
# generate core file when service crash
# enableCoreFile 1
monitor 1
monitorFQDN fastbee
audit 1

View File

@@ -0,0 +1,110 @@
debug = true
taosConfigDir = ""
port = 6041
logLevel = "info"
httpCodeServerError = false
SMLAutoCreateDB = false
[cors]
allowAllOrigins = true
#[pool]
#maxConnect = 0
#maxIdle = 0
#idleTimeout = 0
[ssl]
enable = false
certFile = ""
keyFile = ""
[log]
#path = "/var/log/taos"
rotationCount = 30
rotationTime = "24h"
rotationSize = "1GB"
enableRecordHttpSql = false
sqlRotationCount = 2
sqlRotationTime = "24h"
sqlRotationSize = "1GB"
[monitor]
disable = true
collectDuration = "3s"
incgroup = false
pauseQueryMemoryThreshold = 70
pauseAllMemoryThreshold = 80
identity = ""
[uploadKeeper]
enable = true
url = "http://127.0.0.1:6043/adapter_report"
interval = "15s"
timeout = "5s"
retryTimes = 3
retryInterval = "5s"
[opentsdb]
enable = true
[influxdb]
enable = true
[statsd]
enable = false
port = 6044
db = "statsd"
user = "root"
password = "taosdata"
worker = 10
gatherInterval = "5s"
protocol = "udp4"
maxTCPConnections = 250
tcpKeepAlive = false
allowPendingMessages = 50000
deleteCounters = true
deleteGauges = true
deleteSets = true
deleteTimings = true
[collectd]
enable = false
port = 6045
db = "collectd"
user = "root"
password = "taosdata"
worker = 10
[opentsdb_telnet]
enable = false
maxTCPConnections = 250
tcpKeepAlive = false
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
ports = [6046, 6047, 6048, 6049]
user = "root"
password = "taosdata"
batchSize = 1
flushInterval = "0s"
[node_exporter]
enable = false
db = "node_exporter"
user = "root"
password = "taosdata"
urls = ["http://fastbee:9100"]
responseTimeout = "5s"
httpUsername = ""
httpPassword = ""
httpBearerTokenString = ""
caCertFile = ""
certFile = ""
keyFile = ""
insecureSkipVerify = true
gatherDuration = "5s"
[prometheus]
enable = true
[tmq]
releaseIntervalMultiplierForAutocommit = 2

View File

@@ -0,0 +1,48 @@
# Start with debug middleware for gin
debug = false
# Listen port, default is 6043
port = 6043
# log level
loglevel = "info"
# go pool size
gopoolsize = 50000
# interval for metrics
RotationInterval = "15s"
[tdengine]
host = "fastbee"
port = 6041
username = "root"
password = "taosdata"
usessl = false
[metrics]
# metrics prefix in metrics names.
prefix = "taos"
# export some tables that are not super table
tables = []
# database for storing metrics data
[metrics.database]
name = "log"
# database options for db storing metrics data
[metrics.database.options]
vgroups = 1
buffer = 64
KEEP = 90
cachemodel = "both"
[environment]
# Whether running in cgroup.
incgroup = false
[log]
#path = "/var/log/taos"
rotationCount = 5
rotationTime = "24h"
rotationSize = 100000000

Binary file not shown.

Binary file not shown.

View File

View File

@@ -30,6 +30,14 @@ spring:
merge-sql: true
wall:
none-base-statement-allow: true
taos: # 配置 taos 数据源
enabled: false
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.taosdata.jdbc.rs.RestfulDriver
url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8
username: root
password: taosdata
dbName: fastbee_log
# slave:
# type: com.alibaba.druid.pool.DruidDataSource
# driver-class-name: com.mysql.cj.jdbc.Driver
@@ -43,7 +51,7 @@ spring:
redis:
host: localhost # 地址
port: 6379 # 端口默认为6379
database: 15 # 数据库索引
database: 0 # 数据库索引
password: fastbee # 密码
timeout: 10s # 连接超时时间
lettuce:

View File

@@ -30,6 +30,14 @@ spring:
merge-sql: true
wall:
none-base-statement-allow: true
taos: # 配置 taos 数据源
enabled: false
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.taosdata.jdbc.rs.RestfulDriver
url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8
username: root
password: taosdata
dbName: fastbee_log
# slave:
# type: com.alibaba.druid.pool.DruidDataSource
# driver-class-name: com.mysql.cj.jdbc.Driver

View File

@@ -30,6 +30,34 @@ spring:
merge-sql: true
wall:
none-base-statement-allow: true
taos: # 配置 taos 数据源
enabled: false
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.taosdata.jdbc.rs.RestfulDriver
url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8
# driver-class-name: com.taosdata.jdbc.TSDBDriver
# url: jdbc:TAOS://fastbee:6030/fastbee_log?timezone=UTC-8&charset=utf-8
# driver-class-name: com.taosdata.jdbc.ws.WebSocketDriver
# url: jdbc:TAOS-WS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8
username: root
password: taosdata
dbName: fastbee_log
# influx: # 配置 influx 数据源
# enabled: false
# url: http://81.71.97.58:8086
# token: inX0k-IPfSgKg6AIfoZm6Mv0DQyQOKCkfvs5ZF3a836Yzx2Ew9QgxsHev40_2gztuMn6tofwyS6nfbT4cD-SeA==
# bucket: device_log
# org: fastbee
# measurement: device_log
# iotdb:
# enabled: false
# driver-class-name: org.apache.iotdb.jdbc.IoTDBDriver
# url: jdbc:iotdb://81.71.97.58:6667/
# username: root
# password: root
# dbName: root.ln
# druid:
# validation-query: ''
# sqlServer: # 配置 SQLServer 数据源
# type: com.alibaba.druid.pool.DruidDataSource
# driver-class-name: com.microsoft.sqlserver.jdbc.SQLServerDriver

View File

@@ -59,4 +59,16 @@ public class DeviceLogController extends BaseController
return getDataTable(list);
}
/**
* 新增设备日志
*/
@ApiOperation("新增设备日志")
@PreAuthorize("@ss.hasPermi('iot:device:add')")
@Log(title = "设备日志", businessType = BusinessType.INSERT)
@PostMapping
public AjaxResult add(@RequestBody DeviceLog deviceLog)
{
return toAjax(deviceLogService.insertDeviceLog(deviceLog));
}
}

View File

@@ -6,11 +6,13 @@ import com.fastbee.common.exception.ServiceException;
import com.fastbee.common.utils.DateUtils;
import com.fastbee.common.utils.gateway.mq.TopicsUtils;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.domain.EventLog;
import com.fastbee.common.core.thingsModel.ThingsModelSimpleItem;
import com.fastbee.common.core.thingsModel.ThingsModelValuesInput;
import com.fastbee.iot.service.IDeviceService;
import com.fastbee.iot.service.IEventLogService;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.mq.model.ReportDataBo;
import com.fastbee.mq.service.IDataHandler;
import com.fastbee.mq.service.IMqttMessagePublish;
@@ -44,6 +46,8 @@ public class DataHandlerImpl implements IDataHandler {
private MqttRemoteManager remoteManager;
@Resource
private TopicsUtils topicsUtils;
@Resource
private ILogService logService;
/**
* 上报属性或功能处理
@@ -86,30 +90,32 @@ public class DataHandlerImpl implements IDataHandler {
try {
List<ThingsModelSimpleItem> thingsModelSimpleItems = JSON.parseArray(bo.getMessage(), ThingsModelSimpleItem.class);
Device device = deviceService.selectDeviceBySerialNumber(bo.getSerialNumber());
List<EventLog> results = new ArrayList<>();
List<DeviceLog> results = new ArrayList<>();
for (int i = 0; i < thingsModelSimpleItems.size(); i++) {
// 添加到设备日志
EventLog event = new EventLog();
event.setDeviceId(device.getDeviceId());
event.setDeviceName(device.getDeviceName());
event.setLogValue(thingsModelSimpleItems.get(i).getValue());
event.setRemark(thingsModelSimpleItems.get(i).getRemark());
event.setSerialNumber(device.getSerialNumber());
event.setIdentity(thingsModelSimpleItems.get(i).getId());
event.setLogType(3);
event.setIsMonitor(0);
event.setUserId(device.getUserId());
event.setUserName(device.getUserName());
event.setTenantId(device.getTenantId());
event.setTenantName(device.getTenantName());
event.setCreateTime(DateUtils.getNowDate());
DeviceLog deviceLog = new DeviceLog();
deviceLog.setDeviceId(device.getDeviceId());
deviceLog.setDeviceName(device.getDeviceName());
deviceLog.setLogValue(thingsModelSimpleItems.get(i).getValue());
deviceLog.setRemark(thingsModelSimpleItems.get(i).getRemark());
deviceLog.setSerialNumber(device.getSerialNumber());
deviceLog.setIdentify(thingsModelSimpleItems.get(i).getId());
deviceLog.setLogType(3);
deviceLog.setIsMonitor(0);
deviceLog.setUserId(device.getTenantId());
deviceLog.setUserName(device.getTenantName());
deviceLog.setTenantId(device.getTenantId());
deviceLog.setTenantName(device.getTenantName());
deviceLog.setCreateBy(device.getCreateBy());
deviceLog.setCreateTime(DateUtils.getNowDate());
// 1=影子模式2=在线模式3=其他
event.setMode(2);
results.add(event);
//eventLogService.insertEventLog(event);
deviceLog.setMode(2);
results.add(deviceLog);
}
eventLogService.insertBatch(results);
} catch (Exception e) {
for (DeviceLog deviceLog : results) {
logService.saveDeviceLog(deviceLog);
}
}catch (Exception e) {
log.error("接收事件,解析数据时异常 message={}", e.getMessage());
}
}

View File

@@ -143,6 +143,28 @@
<version>19.3.0.0</version>
</dependency>
<!-- TDengine连接 START-->
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>${tdengine.version}</version>
</dependency>
<!-- TDengine连接 END-->
<!--influxdb-->
<dependency>
<groupId>com.influxdb</groupId>
<artifactId>influxdb-client-java</artifactId>
<version>6.7.0</version>
</dependency>
<!--iotdb-->
<dependency>
<groupId>org.apache.iotdb</groupId>
<artifactId>iotdb-jdbc</artifactId>
<version>${iotdb.version}</version>
</dependency>
</dependencies>
</project>

View File

@@ -66,7 +66,7 @@ public class DeviceLog extends BaseEntity
/** 标识符 */
@ApiModelProperty("标识符")
@Excel(name = "标识符")
private String identity;
private String identify;
/** 是否监测数据1=是0=否) */
@ApiModelProperty("是否监测数据1=是0=否)")
@@ -320,14 +320,14 @@ public class DeviceLog extends BaseEntity
{
return deviceName;
}
public void setIdentity(String identity)
public void setIdentify(String identify)
{
this.identity = identity;
this.identify = identify;
}
public String getIdentity()
public String getIdentify()
{
return identity;
return identify;
}
public void setIsMonitor(Integer isMonitor)
{
@@ -347,7 +347,7 @@ public class DeviceLog extends BaseEntity
.append("logValue", getLogValue())
.append("deviceId", getDeviceId())
.append("deviceName", getDeviceName())
.append("identity", getIdentity())
.append("identify", getIdentify())
.append("createBy", getCreateBy())
.append("isMonitor", getIsMonitor())
.append("createTime", getCreateTime())

View File

@@ -23,7 +23,7 @@ public class EventLog extends BaseEntity {
/** 标识符 */
@ApiModelProperty("标识符")
@Excel(name = "标识符")
private String identity;
private String identify;
/** 物模型名称 */
@ApiModelProperty("物模型名称")
@@ -94,14 +94,14 @@ public class EventLog extends BaseEntity {
{
return logId;
}
public void setIdentity(String identity)
public void setIdentify(String identify)
{
this.identity = identity;
this.identify = identify;
}
public String getIdentity()
public String getIdentify()
{
return identity;
return identify;
}
public void setModelName(String modelName)
{
@@ -216,7 +216,7 @@ public class EventLog extends BaseEntity {
public String toString() {
return new ToStringBuilder(this, ToStringStyle.MULTI_LINE_STYLE)
.append("logId", getLogId())
.append("identity", getIdentity())
.append("identify", getIdentify())
.append("modelName", getModelName())
.append("logType", getLogType())
.append("logValue", getLogValue())

View File

@@ -3,13 +3,10 @@ package com.fastbee.iot.mapper;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tdengine.service.model.TdLogDto;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.Date;
import java.util.List;
/**
@@ -44,6 +41,13 @@ public interface DeviceLogMapper
*/
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog);
/**
* 新增设备日志
*
* @param deviceLog 设备日志
* @return 结果
*/
public int insertDeviceLog(DeviceLog deviceLog);
/**
* 批量保存图片

View File

@@ -0,0 +1,38 @@
package com.fastbee.iot.mapper;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.MonitorModel;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface IotDbLogMapper {
void createDB(String database);
Long countDB(String database);
int save(DeviceLog deviceLog);
int deleteDeviceLogByDeviceNumber(@Param("serialNumber") String deviceNumber);
Long selectPropertyLogCount(@Param("device") Device device);
Long selectEventLogCount(@Param("device") Device device);
Long selectMonitorLogCount(@Param("device") Device device);
/***
* 监测数据列表
*/
List<MonitorModel> selectMonitorList(@Param("device") DeviceLog deviceLog);
/***
* 日志列表
*/
List<DeviceLog> selectDeviceLogList(@Param("device") DeviceLog deviceLog);
List<DeviceLog> selectEventLogList(@Param("device") DeviceLog deviceLog);
}

View File

@@ -0,0 +1,84 @@
package com.fastbee.iot.mapper;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tsdb.model.TdLogDto;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @package com.fastbee.mysql.mysql.tdengine
* 类名: DatabaseMapper
* 时间: 2022/5/16,0016 1:27
* 开发人: wxy
*/
@Repository
public interface TDDeviceLogMapper {
/***
* 创建数据库
*/
int createDB(String database);
/***
* 创建超级表
*/
int createSTable(String database);
/***
* 新增设备日志
*/
int save(@Param("database") String database, @Param("device") DeviceLog deviceLog);
/**
* 批量插入数据
*
* @param database 数据库名
* @param data list集合
*/
int saveBatch(@Param("database") String database, @Param("data") TdLogDto data);
/***
* 设备属性数据总数
*/
Long selectPropertyLogCount(@Param("database") String database, @Param("device") Device device);
/***
* 设备功能数据总数
*/
Long selectFunctionLogCount(@Param("database") String database, @Param("device") Device device);
/***
* 设备事件数据总数
*/
Long selectEventLogCount(@Param("database") String database, @Param("device") Device device);
/***
* 设备监测数据总数
*/
Long selectMonitorLogCount(@Param("database") String database, @Param("device") Device device);
/***
* 监测数据列表
*/
List<MonitorModel> selectMonitorList(@Param("database") String database, @Param("device") DeviceLog deviceLog);
/***
* 日志列表
*/
List<DeviceLog> selectDeviceLogList(@Param("database") String database, @Param("device") DeviceLog deviceLog);
Page<DeviceLog> selectEventLogList(Page<DeviceLog> page, @Param("database") String database, @Param("device") DeviceLog deviceLog);
/***
* 根据设备ID删除设备日志
*/
int deleteDeviceLogByDeviceNumber(@Param("database") String dbName, @Param("serialNumber") String serialNumber);
}

View File

@@ -16,5 +16,5 @@ public class HistoryModel {
private String value;
private String identity;
private String identify;
}

View File

@@ -18,6 +18,10 @@ public class PropertyDto
private String id;
/** 物模型名称 */
private String name;
/**
* 物模型值
*/
private String value;
/** 是否图表展示0-否1-是) */
private Integer isChart;
/** 是否历史存储0-否1-是) */

View File

@@ -32,4 +32,12 @@ public interface IDeviceLogService
*/
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog);
/**
* 新增设备日志
*
* @param deviceLog 设备日志
* @return 结果
*/
public int insertDeviceLog(DeviceLog deviceLog);
}

View File

@@ -1,17 +1,16 @@
package com.fastbee.iot.service.impl;
import com.fastbee.common.core.domain.model.LoginUser;
import com.fastbee.common.utils.DateUtils;
import com.fastbee.common.utils.SecurityUtils;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.tdengine.service.ILogService;
import com.fastbee.iot.mapper.DeviceLogMapper;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.service.IDeviceLogService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
/**
* 设备日志Service业务层处理
@@ -52,4 +51,20 @@ public class DeviceLogServiceImpl implements IDeviceLogService
}
return logService.selectDeviceLogList(deviceLog);
}
/**
* 新增设备日志
*
* @param deviceLog 设备日志
* @return 结果
*/
@Override
public int insertDeviceLog(DeviceLog deviceLog) {
deviceLog.setCreateTime(DateUtils.getNowDate());
LoginUser loginUser = SecurityUtils.getLoginUser();
deviceLog.setTenantId(loginUser.getDeptUserId());
deviceLog.setUserId(loginUser.getUserId());
deviceLog.setCreateBy(loginUser.getUsername());
return logService.saveDeviceLog(deviceLog);
}
}

View File

@@ -67,7 +67,7 @@ public class DeviceRuntimeServiceImpl implements IDeviceRuntimeService {
log.setModelName(specs.getName());
log.setLogType(type.getCode());
log.setSpecs(JSONObject.toJSONString(specs.getDatatype()));
log.setIdentity(specs.getId());
log.setIdentify(specs.getId());
log.setSerialNumber(serialNumber);
log.setSlaveId(specs.getSlaveId());
log.setIsMonitor(specs.getIsMonitor());

View File

@@ -14,6 +14,7 @@ import com.fastbee.common.core.thingsModel.ThingsModelSimpleItem;
import com.fastbee.common.core.thingsModel.ThingsModelValuesInput;
import com.fastbee.common.enums.DataEnum;
import com.fastbee.common.enums.DeviceStatus;
import com.fastbee.common.enums.ThingsModelType;
import com.fastbee.common.exception.ServiceException;
import com.fastbee.common.utils.DateUtils;
import com.fastbee.common.utils.StringUtils;
@@ -34,7 +35,7 @@ import com.fastbee.iot.model.ThingsModels.ThingsModelValueItem;
import com.fastbee.iot.model.ThingsModels.ValueItem;
import com.fastbee.iot.service.*;
import com.fastbee.iot.service.cache.IDeviceCache;
import com.fastbee.iot.tdengine.service.ILogService;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.system.service.ISysUserService;
import org.quartz.SchedulerException;
import org.slf4j.Logger;
@@ -220,6 +221,10 @@ public class DeviceServiceImpl implements IDeviceService {
String key = RedisKeyBuilder.buildTSLVCacheKey(input.getProductId(), input.getDeviceNumber());
Map<String, String> maps = new HashMap<String, String>();
List<ThingsModelSimpleItem> list = new ArrayList<>();
//属性存储集合
List<DeviceLog> deviceLogList = new ArrayList<>();
//指令存储集合
List<FunctionLog> functionLogList = new ArrayList<>();
for (ThingsModelSimpleItem item : input.getThingsModelValueRemarkItem()) {
String identity = item.getId();
Integer slaveId = input.getSlaveId() == null ? item.getSlaveId() : input.getSlaveId();
@@ -281,12 +286,82 @@ public class DeviceServiceImpl implements IDeviceService {
/* ★★★★★★★★★★★★★★★★★★★★★★ 处理数据 - 结束 ★★★★★★★★★★★★★★★★★★★★★★*/
/*★★★★★★★★★★★★★★★★★★★★★★ 存储数据 - 开始 ★★★★★★★★★★★★★★★★★★★★★★*/
if (null != dto.getIsHistory()) {
}
ThingsModelType modelType = ThingsModelType.getType(dto.getType());
Device device = this.selectDeviceBySerialNumber(serialNumber);
switch (modelType) {
case PROP:
if (1 == dto.getIsHistory()) {
DeviceLog deviceLog = new DeviceLog();
deviceLog.setSerialNumber(serialNumber);
deviceLog.setLogType(type);
// 1=影子模式2=在线模式3=其他
deviceLog.setMode(isShadow ? 1 : 2);
// 设备日志值
deviceLog.setLogValue(value);
deviceLog.setRemark(item.getRemark());
deviceLog.setIdentify(id);
deviceLog.setCreateTime(DateUtils.getNowDate());
deviceLog.setCreateBy(device.getCreateBy());
deviceLog.setUserId(device.getTenantId());
deviceLog.setUserName(device.getTenantName());
deviceLog.setTenantId(device.getTenantId());
deviceLog.setTenantName(device.getTenantName());
deviceLog.setModelName(dto.getName());
deviceLog.setIsMonitor(dto.getIsMonitor());
deviceLogList.add(deviceLog);
}
break;
case SERVICE:
if (1 == dto.getIsHistory()) {
FunctionLog function = new FunctionLog();
function.setCreateTime(DateUtils.getNowDate());
function.setFunValue(value);
function.setSerialNumber(input.getDeviceNumber());
function.setIdentify(id);
function.setShowValue(value);
// 属性获取
function.setFunType(2);
function.setUserId(device.getTenantId());
function.setCreateBy(device.getCreateBy());
function.setModelName(dto.getName());
functionLogList.add(function);
}
break;
case EVENT:
DeviceLog event = new DeviceLog();
event.setDeviceId(device.getDeviceId());
event.setDeviceName(device.getDeviceName());
event.setLogValue(value);
event.setSerialNumber(serialNumber);
event.setIdentify(id);
event.setLogType(3);
event.setIsMonitor(0);
event.setUserId(device.getTenantId());
event.setUserName(device.getTenantName());
event.setTenantId(device.getTenantId());
event.setTenantName(device.getTenantName());
event.setCreateTime(DateUtils.getNowDate());
event.setCreateBy(device.getCreateBy());
// 1=影子模式2=在线模式3=其他
event.setMode(2);
event.setModelName(dto.getName());
deviceLogList.add(event);
break;
}
list.add(item);
}
redisCache.hashPutAll(key, maps);
if (!CollectionUtils.isEmpty(functionLogList) && !isShadow) {
functionLogService.insertBatch(functionLogList);
}
if (!CollectionUtils.isEmpty(deviceLogList) && !isShadow) {
long baseTs = System.currentTimeMillis();
for (int i = 0; i < deviceLogList.size(); i++) {
// 每条间隔1毫秒避免TDengine时间冲突
deviceLogList.get(i).setTs(new Date(baseTs + i));
logService.saveDeviceLog(deviceLogList.get(i));
}
}
/* ★★★★★★★★★★★★★★★★★★★★★★ 存储数据 - 结束 ★★★★★★★★★★★★★★★★★★★★★★*/
return list;
}
@@ -891,31 +966,38 @@ public class DeviceServiceImpl implements IDeviceService {
}
}
int result = deviceMapper.updateDeviceStatus(device);
// 添加到设备日志
EventLog event = new EventLog();
event.setDeviceId(device.getDeviceId());
event.setDeviceName(device.getDeviceName());
event.setSerialNumber(device.getSerialNumber());
event.setIsMonitor(0);
event.setUserId(device.getUserId());
event.setUserName(device.getUserName());
event.setTenantId(device.getTenantId());
event.setTenantName(device.getTenantName());
event.setCreateTime(DateUtils.getNowDate());
// 日志模式 1=影子模式2=在线模式3=其他
event.setMode(3);
DeviceLog deviceLog = new DeviceLog();
deviceLog.setDeviceId(device.getDeviceId());
deviceLog.setDeviceName(device.getDeviceName());
deviceLog.setSerialNumber(device.getSerialNumber());
deviceLog.setIsMonitor(0);
deviceLog.setTenantId(device.getTenantId());
deviceLog.setUserId(device.getTenantId());
deviceLog.setUserName(device.getTenantName());
deviceLog.setTenantName(device.getTenantName());
deviceLog.setCreateTime(DateUtils.getNowDate());
deviceLog.setCreateBy(device.getCreateBy());
deviceLog.setMode(3);
if (device.getStatus() == 3) {
event.setLogValue("1");
event.setRemark("设备上线");
event.setIdentity("online");
event.setLogType(5);
deviceLog.setLogValue("1");
deviceLog.setRemark("设备上线");
deviceLog.setIdentify("online");
deviceLog.setLogType(5);
log.info("设备上线,sn{}", device.getSerialNumber());
} else if (device.getStatus() == 4) {
event.setLogValue("0");
event.setRemark("设备离线");
event.setIdentity("offline");
event.setLogType(6);
deviceLog.setLogValue("0");
deviceLog.setRemark("设备离线");
deviceLog.setIdentify("offline");
deviceLog.setLogType(6);
log.info("设备离线,sn{}", device.getSerialNumber());
} else if (device.getStatus() == 2) {
deviceLog.setLogValue("2");
deviceLog.setRemark("设备禁用");
deviceLog.setIdentify("disable");
deviceLog.setLogType(8);
log.info("设备禁用,sn{}", device.getSerialNumber());
}
eventLogMapper.insertEventLog(event);
logService.saveDeviceLog(deviceLog);
return result;
}

View File

@@ -1,63 +0,0 @@
package com.fastbee.iot.tdengine.service.impl;
import com.fastbee.common.utils.DateUtils;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.tdengine.service.ILogService;
import com.fastbee.iot.mapper.DeviceLogMapper;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tdengine.service.model.TdLogDto;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Service
public class MySqlLogServiceImpl implements ILogService {
private DeviceLogMapper deviceLogMapper;
public MySqlLogServiceImpl(DeviceLogMapper _deviceLogMapper){
this.deviceLogMapper=_deviceLogMapper;
}
/***
* 根据设备ID删除设备日志
* @return
*/
@Override
public int deleteDeviceLogByDeviceNumber(String deviceNumber) {
return deviceLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber);
}
/***
* 设备属性、功能、事件和监测数据总数
* @return
*/
@Override
public DeviceStatistic selectCategoryLogCount(Device device){
return deviceLogMapper.selectCategoryLogCount(device);
}
/***
* 监测数据列表
* @return
*/
@Override
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog) {
return deviceLogMapper.selectMonitorList(deviceLog);
}
/***
* 日志列表
* @return
*/
@Override
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog) {
return deviceLogMapper.selectDeviceLogList(deviceLog);
}
}

View File

@@ -0,0 +1,75 @@
package com.fastbee.iot.tsdb.config;
import com.influxdb.client.*;
import lombok.Data;
import okhttp3.OkHttpClient;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.concurrent.TimeUnit;
/**
* @Author gx_ma
* @Date: 2025/03/04/ 11:19
* @description
*/
@Data
@Configuration
@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.influx")
public class InfluxConfig {
private boolean enabled;
private String url;
private String token;
private String org;
private String bucket;
private String measurement;
/**
* 创建 OkHttpClient 实例,用于 HTTP 请求配置(单例)
*
* @return OkHttpClient 实例
*/
@Bean
@ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true")
public OkHttpClient okHttpClient() {
return new OkHttpClient.Builder()
.connectTimeout(30, TimeUnit.SECONDS)
.readTimeout(60, TimeUnit.SECONDS)
.connectionPool(new okhttp3.ConnectionPool(50, 1, TimeUnit.MINUTES))
.build();
}
/**
* 创建 InfluxDBClient 客户端实例
*
* @return InfluxDBClient 实例
*/
@Bean
@ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true")
public InfluxDBClient influxDBClient(OkHttpClient okHttpClient) {
return InfluxDBClientFactory.create(
InfluxDBClientOptions.builder()
.url(this.url)
.org(this.org)
.bucket(this.bucket)
.authenticateToken(this.token.toCharArray())
.okHttpClient(okHttpClient.newBuilder())
.build()
);
}
/**
* 创建 WriteApiBlocking 写入 API 实例
*
* @param influxDBClient InfluxDBClient 实例
* @return WriteApiBlocking 实例
*/
@Bean
@ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true")
public WriteApiBlocking writeApi(final InfluxDBClient influxDBClient) {
return influxDBClient.getWriteApiBlocking();
}
}

View File

@@ -0,0 +1,16 @@
package com.fastbee.iot.tsdb.config;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
@Configuration
@Data
@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.iotdb")
public class IotDbConfig {
private boolean enabled;
private String dbName;
private String url;
private String username;
private String password;
}

View File

@@ -0,0 +1,22 @@
package com.fastbee.iot.tsdb.config;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
/**
* 类名: TDengineConfig
* 描述: TDengine配置类
* 时间: 2022/5/13,0016 1:14
* 开发人: wxy
*/
@Configuration
@Data
@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.taos")
public class TDengineConfig {
private boolean enabled;
private String dbName;
private String url;
private String username;
private String password;
}

View File

@@ -0,0 +1,131 @@
package com.fastbee.iot.tsdb.init;
import com.alibaba.druid.pool.DruidDataSource;
import com.fastbee.iot.tsdb.config.InfluxConfig;
import com.fastbee.iot.tsdb.config.IotDbConfig;
import com.fastbee.iot.tsdb.config.TDengineConfig;
import com.fastbee.iot.tsdb.service.ILogService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.sql.Connection;
import java.sql.PreparedStatement;
/**
* 类名: ApplicationStarted
* 时间: 2022/5/18,0018 1:41
* 开发人: wxy
*/
@Slf4j
@Component
public class ApplicationStarted {
@Resource
private ILogService tsdbService;
@Resource
private TDengineConfig tDengineConfig;
@Resource
private InfluxConfig influxConfig;
@Resource
private IotDbConfig iotDbConfig;
@PostConstruct
public void run() {
//同时只能启用一个时序数据库
// 缓存配置状态以减少重复调用
boolean isTDengineEnabled = tDengineConfig.isEnabled();
boolean isInfluxEnabled = influxConfig.isEnabled();
boolean isIoTDBEnabled = iotDbConfig.isEnabled();
// 检查是否同时启用了多个时序数据库
int enabledCount = (isTDengineEnabled ? 1 : 0) + (isInfluxEnabled ? 1 : 0) + (isIoTDBEnabled ? 1 : 0);
if (enabledCount > 1) {
log.error("只能启用一个时序数据库,当前启用的数据库包括:"
+ (isTDengineEnabled ? "TDengine, " : "")
+ (isInfluxEnabled ? "Influx, " : "")
+ (isIoTDBEnabled ? "IoTDB" : ""));
return;
}
// 根据配置选择时序数据库
if (isTDengineEnabled) {
try {
initTDengine(tDengineConfig.getDbName());
log.info("使用TDengine存储设备数据初始化成功数据库名称: {}", tDengineConfig.getDbName());
} catch (Exception e) {
log.error("TDengine初始化失败数据库名称: {}, 错误信息: {}", tDengineConfig.getDbName(), e.getMessage(), e);
}
} else if (isInfluxEnabled) {
log.info("使用Influx存储设备数据初始化成功");
} else if (isIoTDBEnabled) {
initIoTDB(iotDbConfig.getDbName());
log.info("使用IoTDB存储设备数据初始化成功");
} else {
log.info("未启用任何时序数据库使用Mysql存储设备数据初始化成功");
}
}
public void initIoTDB(String dbName) {
tsdbService.createSTable(dbName);
log.info("完成IoTDB超级表的创建");
}
/**
* @return
* @Method
* @Description 开始初始化加载系统参数, 创建数据库和超级表
* @Param null
* @date 2022/5/22,0022 14:27
* @author wxy
*/
public void initTDengine(String dbName) {
try {
createDatabase();
//创建数据库表
tsdbService.createSTable(dbName);
log.info("完成超级表的创建");
} catch (Exception e) {
log.error("错误", e.getMessage());
e.printStackTrace();
}
}
/**
* @return
* @Method
* @Description 根据数据库连接自动创建数据库
* @Param null
* @date 2022/5/24,0024 14:32
* @author wxy
*/
private void createDatabase() {
try {
//去掉数据库名
String jdbcUrl = tDengineConfig.getUrl();
int startIndex = jdbcUrl.indexOf('/', 15);
int endIndex = jdbcUrl.indexOf('?');
String newJdbcUrl = jdbcUrl.substring(0, startIndex);
newJdbcUrl = newJdbcUrl + jdbcUrl.substring(endIndex);
DruidDataSource dataSource = new DruidDataSource();
dataSource.setUrl(newJdbcUrl);
dataSource.setUsername(tDengineConfig.getUsername());
dataSource.setPassword(tDengineConfig.getPassword());
if (tDengineConfig.getUrl().contains("jdbc:TAOS://")) {
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
} else if (tDengineConfig.getUrl().contains("jdbc:TAOS-WS://")) {
dataSource.setDriverClassName("com.taosdata.jdbc.ws.WebSocketDriver");
} else if(tDengineConfig.getUrl().contains("jdbc:TAOS-RS://")) {
dataSource.setDriverClassName("com.taosdata.jdbc.rs.RestfulDriver");
}
Connection conn = dataSource.getConnection();
PreparedStatement ps = conn.prepareStatement(String.format("create database if not exists %s;", tDengineConfig.getDbName()));
boolean resultS = ps.execute();
log.info("完成数据库创建:{}",resultS);
} catch (Exception e) {
log.info("错误", e.getMessage());
e.printStackTrace();
}
}
}

View File

@@ -1,4 +1,4 @@
package com.fastbee.iot.tdengine.service.model;
package com.fastbee.iot.tsdb.model;
import com.fastbee.iot.domain.DeviceLog;
import lombok.AllArgsConstructor;

View File

@@ -1,17 +1,13 @@
package com.fastbee.iot.tdengine.service;
package com.fastbee.iot.tsdb.service;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tdengine.service.model.TdLogDto;
import org.springframework.stereotype.Service;
import com.fastbee.iot.tsdb.model.TdLogDto;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* @package iot.iot.log
@@ -22,6 +18,16 @@ import java.util.Map;
*/
public interface ILogService {
int createSTable(String database);
/** 保存设备日志 **/
int saveDeviceLog(DeviceLog deviceLog);
/**
* 批量保存日志
*/
int saveBatch(TdLogDto dto);
/** 根据设备编号删除设备日志 **/
int deleteDeviceLogByDeviceNumber(String deviceNumber);

View File

@@ -0,0 +1,514 @@
package com.fastbee.iot.tsdb.service.impl;
import com.baomidou.dynamic.datasource.annotation.DS;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tsdb.config.InfluxConfig;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.iot.tsdb.model.TdLogDto;
import com.fastbee.iot.util.SnowflakeIdWorker;
import com.influxdb.client.InfluxDBClient;
import com.influxdb.client.QueryApi;
import com.influxdb.client.WriteApiBlocking;
import com.influxdb.client.domain.WritePrecision;
import com.influxdb.client.write.Point;
import com.influxdb.query.FluxRecord;
import com.influxdb.query.FluxTable;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Primary;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.util.*;
import java.util.stream.Collectors;
/**
* @Author gx_ma
* @Date: 2025/03/04/ 11:16
* @description
*/
@Slf4j
@Primary
@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.influx.enabled", havingValue = "true")
@DS("influx")
@Service("Influx")
public class InfluxLogService implements ILogService {
@Resource
private InfluxConfig influxConfig;
@Resource
private InfluxDBClient influxDBClient;
@Resource
private WriteApiBlocking writeApi;
private SnowflakeIdWorker snowflakeIdWorker = new SnowflakeIdWorker(1);
@Override
public int createSTable(String database) {
return 0;
}
@Override
public int saveDeviceLog(DeviceLog deviceLog) {
long logId = snowflakeIdWorker.nextId();
deviceLog.setLogId(logId);
Point point = Point.measurement(influxConfig.getMeasurement())
.addTag("serialNumber", deviceLog.getSerialNumber())
.addField("logId", deviceLog.getLogId())
.addField("logType", deviceLog.getLogType())
.addField("logValue", deviceLog.getLogValue())
.addField("deviceId", deviceLog.getDeviceId())
.addField("deviceName", deviceLog.getDeviceName())
.addField("identify", deviceLog.getIdentify())
.addField("createBy", deviceLog.getCreateBy())
.addField("isMonitor", deviceLog.getIsMonitor())
.addField("mode", deviceLog.getMode())
.addField("remark", deviceLog.getRemark())
.addField("userId", deviceLog.getUserId())
.addField("userName", deviceLog.getUserName())
.addField("tenantId", deviceLog.getTenantId())
.addField("tenantName", deviceLog.getTenantName())
.addField("modelName", deviceLog.getModelName())
.time(deviceLog.getCreateTime().toInstant(), WritePrecision.NS);
writeApi.writePoint(influxConfig.getBucket(), influxConfig.getOrg(), point);
return 1;
}
@Override
public int saveBatch(TdLogDto dto) {
int ret = 0;
for (DeviceLog deviceLog : dto.getList()) {
ret += this.saveDeviceLog(deviceLog);
}
return ret;
}
@Override
public int deleteDeviceLogByDeviceNumber(String deviceNumber) {
QueryApi queryApi = influxDBClient.getQueryApi();
// 查询待删除的日志数量
String countQuery = String.format(
"from(bucket: \"%s\")\n" +
" |> range(start: 0)\n" +
" |> filter(fn: (r) => r._measurement == \"%s\")\n" +
" |> filter(fn: (r) => r.serialNumber == \"%s\")\n" +
" |> limit(n: 1)\n" +
" |> count()",
influxConfig.getBucket(),
influxConfig.getMeasurement(),
deviceNumber
);
long count = queryApi.queryRaw(countQuery, influxConfig.getOrg()).length();
if (count > 0) {
// 构建删除语句
String deleteQuery = String.format(
"import \"influxdata/influxdb/schema\"\n" +
"schema.delete(\n" +
" bucket: \"%s\",\n" +
" predicate: (r) => r.serialNumber == \"%s\" and r._measurement == \"%s\",\n" +
" start: 0,\n" +
" stop: now()\n" +
")",
influxConfig.getBucket(),
deviceNumber,
influxConfig.getMeasurement()
);
try {
queryApi.queryRaw(deleteQuery, influxConfig.getOrg());
} catch (Exception e) {
log.error("Failed to delete logs for device: {}", deviceNumber, e);
return 0;
}
}
return (int) count;
}
@Override
public DeviceStatistic selectCategoryLogCount(Device device) {
DeviceStatistic statistic = new DeviceStatistic();
Long property = this.selectPropertyLogCount(device);
Long event = this.selectEventLogCount(device);
Long monitor = this.selectMonitorLogCount(device);
statistic.setPropertyCount(property == null ? 0 : property);
statistic.setEventCount(event == null ? 0 : event);
statistic.setMonitorCount(monitor == null ? 0 : monitor);
return statistic;
}
private Long selectMonitorLogCount(Device device) {
QueryApi queryApi = influxDBClient.getQueryApi();
// 构建 Flux 查询语句
StringBuilder fluxQuery = new StringBuilder();
fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n")
.append(" |> range(start: 0)\n")
.append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n")
.append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ")
.append(" |> filter(fn: (r) => r[\"logType\"] == 1 and r[\"isMonitor\"] == 1)");
if (device.getTenantId() != null) {
fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")");
}
if (!Objects.isNull(device.getCreateBy())) {
fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")");
}
fluxQuery.append(" |> group()").append(" |> count(column: \"mode\")");
// 执行查询
System.out.println("Monitor查询条件Flux Query: " + fluxQuery);
List<FluxTable> tables = queryApi.query(fluxQuery.toString());
// 处理查询结果
if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) {
FluxRecord record = tables.get(0).getRecords().get(0);
return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L;
}
return 0L;
}
private Long selectEventLogCount(Device device) {
QueryApi queryApi = influxDBClient.getQueryApi();
StringBuilder fluxQuery = new StringBuilder();
fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n")
.append(" |> range(start: 0)\n")
.append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n")
.append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ")
.append(" |> filter(fn: (r) => r[\"logType\"] == 3)");
if (device.getTenantId() != null) {
fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")");
}
if (!Objects.isNull(device.getCreateBy())) {
fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")");
}
fluxQuery.append("|> group()").append("|> count(column: \"mode\")\n");
List<FluxTable> tables = queryApi.query(fluxQuery.toString());
System.out.println("Event查询条件Flux Query: " + fluxQuery);
if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) {
FluxRecord record = tables.get(0).getRecords().get(0);
return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L;
}
return 0L;
}
private Long selectPropertyLogCount(Device device) {
QueryApi queryApi = influxDBClient.getQueryApi();
StringBuilder fluxQuery = new StringBuilder();
fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n")
.append(" |> range(start: 0)\n")
.append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n")
.append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ")
.append(" |> filter(fn: (r) => r[\"logType\"] == 1)\n");
if (device.getTenantId() != null) {
fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")");
}
if (!Objects.isNull(device.getCreateBy())) {
fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")");
}
fluxQuery.append("|> group()").append(" |> count(column: \"mode\")\n");
List<FluxTable> tables = queryApi.query(fluxQuery.toString());
System.out.println("Property查询条件 Flux Query: " + fluxQuery);
if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) {
FluxRecord record = tables.get(0).getRecords().get(0);
return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L;
}
return 0L;
}
@Override
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog) {
QueryApi queryApi = influxDBClient.getQueryApi();
StringBuilder fluxQuery = new StringBuilder();
fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") ")
.append("|> range(start: 0) ")
.append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") ");
fluxQuery.append("|> pivot(\n" +
" rowKey:[\"_time\"], \n" +
" columnKey: [\"_field\"], \n" +
" valueColumn: \"_value\"\n" +
" )");
fluxQuery.append("|> sort(columns: [\"_time\"], desc: true)")
.append("|> group()");
List<String> filterConditions = new ArrayList<>();
if (deviceLog.getIsMonitor() != null) {
filterConditions.add("r.isMonitor == " + deviceLog.getIsMonitor());
}
if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) {
filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\"");
}
if (deviceLog.getLogType() != null) {
filterConditions.add("r.logType == " + deviceLog.getLogType());
} else {
filterConditions.add("r.logType != 7");
}
if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) {
filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/");
}
fluxQuery.append("|> filter(fn: (r) => ");
for (int i = 0; i < filterConditions.size(); i++) {
if (i > 0) {
fluxQuery.append(" and ");
}
fluxQuery.append(filterConditions.get(i));
}
fluxQuery.append(") ");
// 计算偏移量
int pageNum = deviceLog.getPageNum();
int pageSize = deviceLog.getPageSize();
int offset = (pageNum - 1) * pageSize;
// 添加分页查询
StringBuilder originalQuery = new StringBuilder(fluxQuery);
originalQuery.append("|> limit(n: ").append(pageSize).append(", offset: ").append(offset).append(")");
List<FluxTable> tables = queryApi.query(originalQuery.toString());
List<DeviceLog> deviceLogList = new ArrayList<>();
for (FluxTable table : tables) {
for (FluxRecord record : table.getRecords()) {
DeviceLog log = new DeviceLog();
setDeviceLog(deviceLogList, record, log);
}
}
return deviceLogList;
// 注意:由于使用了 limit 和 offset这里无法直接获取总记录数需要额外查询
// List<FluxTable> countTables = queryApi.query(fluxQuery.toString());
// long total = 0;
// if (!countTables.isEmpty() && !countTables.get(0).getRecords().isEmpty()) {
// total = countTables.get(0).getRecords().size();
// }
// // 创建 MyBatis-Plus 的 Page 对象
// Page<DeviceLog> page = new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize());
// page.setRecords(deviceLogList);
// page.setTotal(total);
// return page;
}
private void setDeviceLog(List<DeviceLog> deviceLogList, FluxRecord record, DeviceLog log) {
log.setLogId((Long) record.getValueByKey("logId"));
log.setLogType(((Number) Objects.requireNonNull(record.getValueByKey("logType"))).intValue());
log.setLogValue((String) record.getValueByKey("logValue"));
log.setDeviceId((Long) record.getValueByKey("deviceId"));
log.setDeviceName((String) record.getValueByKey("deviceName"));
log.setSerialNumber((String) record.getValueByKey("serialNumber"));
log.setIdentify((String) record.getValueByKey("identify"));
log.setCreateBy((String) record.getValueByKey("createBy"));
log.setIsMonitor(((Number) Objects.requireNonNull(record.getValueByKey("isMonitor"))).intValue());
log.setMode(((Number) Objects.requireNonNull(record.getValueByKey("mode"))).intValue());
log.setCreateTime(Date.from(Objects.requireNonNull(record.getTime())));
log.setRemark((String) record.getValueByKey("remark"));
log.setUserId((Long) record.getValueByKey("userId"));
log.setUserName((String) record.getValueByKey("userName"));
log.setTenantId((Long) record.getValueByKey("tenantId"));
log.setTenantName((String) record.getValueByKey("tenantName"));
log.setModelName((String) record.getValueByKey("modelName"));
deviceLogList.add(log);
}
// @Override
// public Page<DeviceLog> selectEventLogList(DeviceLog deviceLog) {
// //事件日志的时间筛选时间范围放在param参数中格式yyyy-MM-dd需要自行封装 HH:mm:ss
// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") {
// String beginTime = deviceLog.getParams().get("beginTime").toString();
// String endTime = deviceLog.getParams().get("endTime").toString();
// beginTime = beginTime + " 00:00:00";
// endTime = endTime + " 23:59:59";
// deviceLog.setBeginTime(beginTime);
// deviceLog.setEndTime(endTime);
// }
// QueryApi queryApi = influxDBClient.getQueryApi();
//
// StringBuilder fluxQuery = new StringBuilder();
// fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") ");
//
// // 处理时间范围
// if (deviceLog.getBeginTime() != null && !deviceLog.getBeginTime().isEmpty()
// && deviceLog.getEndTime() != null && !deviceLog.getEndTime().isEmpty()) {
// SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
// try {
// Date beginDate = sdf.parse(deviceLog.getBeginTime());
// Date endDate = sdf.parse(deviceLog.getEndTime());
// // 转换为RFC3339格式时间字符串
// String startRFC3339 = beginDate.toInstant().toString();
// String stopRFC3339 = endDate.toInstant().toString();
//
// fluxQuery.append("|> range(start: ")
// .append(startRFC3339)
// .append(", stop: ")
// .append(stopRFC3339)
// .append(") ");
// } catch (ParseException e) {
// e.printStackTrace();
// // 若解析失败,可使用默认时间范围
// fluxQuery.append("|> range(start: 0) ");
// }
// } else {
// fluxQuery.append("|> range(start: 0) ");
// }
//
// fluxQuery.append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") ");
//
// // 原始查询添加 pivot 和分页操作
// fluxQuery.append("|> pivot(\n" +
// " rowKey:[\"_time\"], \n" +
// " columnKey: [\"_field\"], \n" +
// " valueColumn: \"_value\"\n" +
// " )");
//
// List<String> filterConditions = new ArrayList<>();
// if (deviceLog.getIsMonitor() != null) {
// filterConditions.add("r.isMonitor == " + deviceLog.getIsMonitor());
// }
// if (deviceLog.getLogType() != null) {
// filterConditions.add("r.logType == " + deviceLog.getLogType());
// } else {
// filterConditions.add("r.logType != 1 and r.logType != 2 and r.logType != 4 and r.logType != 7");
// }
// if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) {
// filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\"");
// }
// if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) {
// filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/");
// }
//
// fluxQuery.append("|> filter(fn: (r) => ");
// for (int i = 0; i < filterConditions.size(); i++) {
// if (i > 0) {
// fluxQuery.append(" and ");
// }
// fluxQuery.append(filterConditions.get(i));
// }
// fluxQuery.append(") ");
// fluxQuery.append("|> sort(columns: [\"_time\"], desc: true)")
// .append("|> group()");
//
// // 计算偏移量
// int pageNum = deviceLog.getPageNum();
// int pageSize = deviceLog.getPageSize();
// int offset = (pageNum - 1) * pageSize;
// // 添加分页查询
// StringBuilder originalQuery = new StringBuilder(fluxQuery);
// originalQuery.append("|> limit(n: ").append(pageSize).append(", offset: ").append(offset).append(")");
//
// List<FluxTable> tables = queryApi.query(originalQuery.toString());
// System.out.println("EventList查询Flux语句" + originalQuery);
//
// List<DeviceLog> deviceLogList = new ArrayList<>();
// for (FluxTable table : tables) {
// for (FluxRecord record : table.getRecords()) {
// DeviceLog log = new DeviceLog();
// setDeviceLog(deviceLogList, record, log);
// }
// }
//
// // 注意:由于使用了 limit 和 offset这里无法直接获取总记录数需要额外查询
// List<FluxTable> countTables = queryApi.query(fluxQuery.toString());
// System.out.println("分页查询Flux语句" + fluxQuery);
// long total = 0;
// if (!countTables.isEmpty() && !countTables.get(0).getRecords().isEmpty()) {
// total = (long) countTables.get(0).getRecords().size();
// }
//
// Page<DeviceLog> page = new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize());
// page.setRecords(deviceLogList);
// page.setTotal(total);
// return page;
// }
@Override
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog) {
QueryApi queryApi = influxDBClient.getQueryApi();
StringBuilder fluxQuery = new StringBuilder();
fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") ");
// 处理时间范围
if (deviceLog.getBeginTime() != null && !deviceLog.getBeginTime().isEmpty()
&& deviceLog.getEndTime() != null && !deviceLog.getEndTime().isEmpty()) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
try {
Date beginDate = sdf.parse(deviceLog.getBeginTime());
Date endDate = sdf.parse(deviceLog.getEndTime());
// 转换为RFC3339格式时间字符串
String startRFC3339 = beginDate.toInstant().toString();
String stopRFC3339 = endDate.toInstant().toString();
fluxQuery.append("|> range(start: ")
.append(startRFC3339)
.append(", stop: ")
.append(stopRFC3339)
.append(") ");
} catch (ParseException e) {
e.printStackTrace();
// 若解析失败,可使用默认时间范围
fluxQuery.append("|> range(start: 0) ");
}
} else {
fluxQuery.append("|> range(start: 0) ");
}
fluxQuery.append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") ");
fluxQuery.append("|> pivot(\n" +
" rowKey:[\"_time\"], \n" +
" columnKey: [\"_field\"], \n" +
" valueColumn: \"_value\"\n" +
" )");
fluxQuery.append("|> filter(fn: (r) => r.isMonitor == 1) ");
List<String> filterConditions = new ArrayList<>();
if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) {
filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\"");
}
if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) {
filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/");
}
if (!filterConditions.isEmpty()) {
fluxQuery.append("|> filter(fn: (r) => ");
for (int i = 0; i < filterConditions.size(); i++) {
if (i > 0) {
fluxQuery.append(" and ");
}
fluxQuery.append(filterConditions.get(i));
}
fluxQuery.append(") ");
}
fluxQuery.append("|> sort(columns: [\"_time\"], desc: true) ");
fluxQuery.append("|> keep(columns: [\"_value\", \"_time\"]) ");
List<FluxTable> tables = queryApi.query(fluxQuery.toString());
List<MonitorModel> monitorList = new ArrayList<>();
for (FluxTable table : tables) {
for (FluxRecord record : table.getRecords()) {
MonitorModel model = new MonitorModel();
model.setValue((String) record.getValue());
model.setTime(new Date(record.getTime().getEpochSecond() * 1000));
monitorList.add(model);
}
}
return monitorList;
}
}

View File

@@ -0,0 +1,120 @@
package com.fastbee.iot.tsdb.service.impl;
import com.baomidou.dynamic.datasource.annotation.DS;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.mapper.IotDbLogMapper;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.model.HistoryModel;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.iot.tsdb.model.TdLogDto;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Primary;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Slf4j
@Primary
@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.iotdb.enabled", havingValue = "true")
@DS("iotdb")
@Service("IotDB")
public class IotDbLogService implements ILogService {
@Resource
private IotDbLogMapper iotDbLogMapper;
@Override
public int createSTable(String database) {
Long count = iotDbLogMapper.countDB(database);
if (count == 0) {
iotDbLogMapper.createDB(database);
}
return 1;
}
@Override
public int saveDeviceLog(DeviceLog deviceLog) {
return iotDbLogMapper.save(deviceLog);
}
@Override
public int saveBatch(TdLogDto dto) {
int ret = 0;
for (DeviceLog deviceLog : dto.getList()) {
ret += this.saveDeviceLog(deviceLog);
}
return ret;
}
@Override
public int deleteDeviceLogByDeviceNumber(String deviceNumber) {
return iotDbLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber);
}
@Override
public DeviceStatistic selectCategoryLogCount(Device device) {
DeviceStatistic statistic = new DeviceStatistic();
Long property = iotDbLogMapper.selectPropertyLogCount(device);
Long event = iotDbLogMapper.selectEventLogCount(device);
Long monitor = iotDbLogMapper.selectMonitorLogCount(device);
statistic.setPropertyCount(property == null ? 0 : property);
statistic.setEventCount(event == null ? 0 : event);
statistic.setMonitorCount(monitor == null ? 0 : monitor);
return statistic;
}
@Override
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog) {
if (deviceLog.getIdentify() != null) {
deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%");
}
return iotDbLogMapper.selectDeviceLogList(deviceLog);
}
// @Override
// public Page<DeviceLog> selectEventLogList(DeviceLog deviceLog) {
// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") {
// String beginTime = deviceLog.getParams().get("beginTime").toString();
// String endTime = deviceLog.getParams().get("endTime").toString();
// beginTime = beginTime + " 00:00:00";
// endTime = endTime + " 23:59:59";
// deviceLog.setBeginTime(beginTime);
// deviceLog.setEndTime(endTime);
// }
// if (deviceLog.getIdentify() != null) {
// deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%");
// }
// // 获取全量数据
// List<DeviceLog> allLogs = iotDbLogMapper.selectEventLogList(deviceLog);
//
// // 手动分页处理
// int pageSize = deviceLog.getPageSize();
// int pageNum = deviceLog.getPageNum();
// int start = (pageNum - 1) * pageSize;
// int end = Math.min(start + pageSize, allLogs.size());
//
// // 构建MyBatis-Plus分页对象
// Page<DeviceLog> page = new Page<>(pageNum, pageSize);
// page.setRecords(allLogs.subList(start, end));
// page.setTotal(allLogs.size());
//
// return page;
// }
@Override
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog) {
if (deviceLog.getIdentify() != null) {
deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%");
}
return iotDbLogMapper.selectMonitorList(deviceLog);
}
}

View File

@@ -0,0 +1,109 @@
package com.fastbee.iot.tsdb.service.impl;
import com.fastbee.common.utils.DateUtils;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.domain.EventLog;
import com.fastbee.iot.mapper.EventLogMapper;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.tsdb.model.TdLogDto;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.iot.mapper.DeviceLogMapper;
import com.fastbee.iot.model.MonitorModel;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
@Service
public class MySqlLogServiceImpl implements ILogService {
private DeviceLogMapper deviceLogMapper;
@Resource
private EventLogMapper eventLogMapper;
public MySqlLogServiceImpl(DeviceLogMapper _deviceLogMapper){
this.deviceLogMapper=_deviceLogMapper;
}
@Override
public int createSTable(String database) {
return 0;
}
/***
* 新增设备日志
* @return
*/
@Override
public int saveDeviceLog(DeviceLog deviceLog) {
if (deviceLog.getLogType() == 3 || deviceLog.getLogType() == 5 || deviceLog.getLogType() == 6 || deviceLog.getLogType() == 8) {
EventLog event = new EventLog();
event.setDeviceId(deviceLog.getDeviceId());
event.setDeviceName(deviceLog.getDeviceName());
event.setSerialNumber(deviceLog.getSerialNumber());
event.setIsMonitor(0);
event.setUserId(deviceLog.getTenantId());
event.setUserName(deviceLog.getTenantName());
event.setTenantId(deviceLog.getTenantId());
event.setTenantName(deviceLog.getTenantName());
event.setCreateTime(DateUtils.getNowDate());
event.setCreateBy(deviceLog.getCreateBy());
// 日志模式 1=影子模式2=在线模式3=其他
event.setMode(3);
event.setLogValue(deviceLog.getLogValue());
event.setRemark(deviceLog.getRemark());
event.setIdentify(deviceLog.getIdentify());
event.setLogType(deviceLog.getLogType());
return eventLogMapper.insertEventLog(event);
} else {
return deviceLogMapper.insertDeviceLog(deviceLog);
}
}
@Override
public int saveBatch(TdLogDto dto) {
int ret = 0;
for (DeviceLog deviceLog : dto.getList()) {
ret += this.saveDeviceLog(deviceLog);
}
return ret;
}
/***
* 根据设备ID删除设备日志
* @return
*/
@Override
public int deleteDeviceLogByDeviceNumber(String deviceNumber) {
return deviceLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber);
}
/***
* 设备属性、功能、事件和监测数据总数
* @return
*/
@Override
public DeviceStatistic selectCategoryLogCount(Device device){
return deviceLogMapper.selectCategoryLogCount(device);
}
/***
* 监测数据列表
* @return
*/
@Override
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog) {
return deviceLogMapper.selectMonitorList(deviceLog);
}
/***
* 日志列表
* @return
*/
@Override
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog) {
return deviceLogMapper.selectDeviceLogList(deviceLog);
}
}

View File

@@ -0,0 +1,126 @@
package com.fastbee.iot.tsdb.service.impl;
import com.baomidou.dynamic.datasource.annotation.DS;
import com.fastbee.iot.domain.Device;
import com.fastbee.iot.domain.DeviceLog;
import com.fastbee.iot.model.DeviceStatistic;
import com.fastbee.iot.tsdb.service.ILogService;
import com.fastbee.iot.model.MonitorModel;
import com.fastbee.iot.mapper.TDDeviceLogMapper;
import com.fastbee.iot.tsdb.model.TdLogDto;
import com.fastbee.iot.util.SnowflakeIdWorker;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Primary;
import org.springframework.stereotype.Service;
import java.util.List;
/**
* 类名: TdengineLogServiceImpl
* 描述: TDengine存储日志数据实现类
* 时间: 2022/5/22,0022 13:38
* 开发人: admin
*/
@Slf4j
@Primary
@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.taos.enabled", havingValue = "true")
@DS("taos")
@Service("Tdengine")
public class TdengineLogServiceImpl implements ILogService {
@Autowired
private TDDeviceLogMapper tDDeviceLogMapper;
private SnowflakeIdWorker snowflakeIdWorker = new SnowflakeIdWorker(1);
@Value("${spring.datasource.dynamic.datasource.taos.dbName}")
private String dbName;
@Override
public int createSTable(String database) {
return tDDeviceLogMapper.createSTable(database);
}
/***
* 新增设备日志
* @return
*/
@Override
public int saveDeviceLog(DeviceLog deviceLog) {
long logId = snowflakeIdWorker.nextId();
deviceLog.setLogId(logId);
return tDDeviceLogMapper.save(dbName, deviceLog);
}
/**
* 批量保存日志
*/
@Override
public int saveBatch(TdLogDto dto) {
return tDDeviceLogMapper.saveBatch(dbName, dto);
}
/***
* 设备属性、功能、事件和监测数据总数
* @return
*/
@Override
public DeviceStatistic selectCategoryLogCount(Device device) {
DeviceStatistic statistic = new DeviceStatistic();
Long property = tDDeviceLogMapper.selectPropertyLogCount(dbName, device);
Long event = tDDeviceLogMapper.selectEventLogCount(dbName, device);
Long monitor = tDDeviceLogMapper.selectMonitorLogCount(dbName, device);
statistic.setPropertyCount(property == null ? 0 : property);
statistic.setEventCount(event == null ? 0 : event);
statistic.setMonitorCount(monitor == null ? 0 : monitor);
return statistic;
}
/***
* 日志列表
* @return
*/
@Override
public List<DeviceLog> selectDeviceLogList(DeviceLog deviceLog) {
return tDDeviceLogMapper.selectDeviceLogList(dbName, deviceLog);
}
// @Override
// public Page<DeviceLog> selectEventLogList(DeviceLog deviceLog) {
// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") {
// String beginTime = deviceLog.getParams().get("beginTime").toString();
// String endTime = deviceLog.getParams().get("endTime").toString();
// beginTime = beginTime + " 00:00:00";
// endTime = endTime + " 23:59:59";
// deviceLog.setBeginTime(beginTime);
// deviceLog.setEndTime(endTime);
// }
// return tDDeviceLogMapper.selectEventLogList(new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize()), dbName, deviceLog);
// }
/***
* 监测数据列表
* @return
*/
@Override
public List<MonitorModel> selectMonitorList(DeviceLog deviceLog) {
if (deviceLog.getIdentify() != null) {
deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%");
}
return tDDeviceLogMapper.selectMonitorList(dbName, deviceLog);
}
/***
* 根据设备ID删除设备日志
* @return
*/
@Override
public int deleteDeviceLogByDeviceNumber(String deviceNumber) {
return tDDeviceLogMapper.deleteDeviceLogByDeviceNumber(dbName, deviceNumber);
}
}

View File

@@ -16,7 +16,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<result property="deviceId" column="device_id" />
<result property="deviceName" column="device_name" />
<result property="serialNumber" column="serial_number" />
<result property="identity" column="identify" />
<result property="identify" column="identify" />
<result property="createBy" column="create_by" />
<result property="isMonitor" column="is_monitor" />
<result property="mode" column="mode" />
@@ -31,7 +31,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<resultMap type="com.fastbee.iot.model.HistoryModel" id="HistoryResult">
<result property="value" column="log_value" />
<result property="time" column="create_time" />
<result property="identity" column="identify" />
<result property="identify" column="identify" />
</resultMap>
<sql id="selectDeviceLogVo">
@@ -42,7 +42,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
select log_value, create_time from iot_device_log
<where>
<if test="1==1"> and is_monitor=1</if>
<if test="identity != null and identity != ''"> and identify = #{identity}</if>
<if test="identify != null and identify != ''"> and identify = #{identify}</if>
<if test="deviceId != null and deviceId !=0"> and device_id = #{deviceId}</if>
<if test="serialNumber != null and serialNumber !=''"> and serial_number = #{serialNumber}</if>
<if test="beginTime != null and beginTime != '' and endTime != null and endTime != ''"> and create_time between #{beginTime} and #{endTime}</if>
@@ -94,6 +94,45 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
</select>
<insert id="insertDeviceLog" parameterType="com.fastbee.iot.domain.DeviceLog" useGeneratedKeys="true" keyProperty="logId">
insert into iot_device_log
<trim prefix="(" suffix=")" suffixOverrides=",">
<if test="logType != null">log_type,</if>
<if test="logValue != null">log_value,</if>
<if test="deviceId != null">device_id,</if>
<if test="deviceName != null and deviceName != ''">device_name,</if>
<if test="serialNumber != null and serialNumber != ''">serial_number,</if>
<if test="identify != null">identify,</if>
<if test="createBy != null">create_by,</if>
<if test="isMonitor != null">is_monitor,</if>
<if test="mode != null">mode,</if>
<if test="createTime != null">create_time,</if>
<if test="remark != null">remark,</if>
<if test="userId != null">user_id,</if>
<if test="userName != null and userName != ''">user_name,</if>
<if test="tenantId != null">tenant_id,</if>
<if test="tenantName != null and tenantName != ''">tenant_name,</if>
<if test="modelName != null and modelName != ''">model_name,</if>
</trim>
<trim prefix="values (" suffix=")" suffixOverrides=",">
<if test="logType != null">#{logType},</if>
<if test="logValue != null">#{logValue},</if>
<if test="deviceId != null">#{deviceId},</if>
<if test="deviceName != null and deviceName != ''">#{deviceName},</if>
<if test="serialNumber != null and serialNumber != ''">#{serialNumber},</if>
<if test="identify != null">#{identify},</if>
<if test="createBy != null">#{createBy},</if>
<if test="isMonitor != null">#{isMonitor},</if>
<if test="mode != null">#{mode},</if>
<if test="createTime != null">#{createTime},</if>
<if test="remark != null">#{remark},</if>
<if test="userId != null">#{userId},</if>
<if test="userName != null and userName != ''">#{userName},</if>
<if test="tenantId != null">#{tenantId},</if>
<if test="tenantName != null and tenantName != ''">#{tenantName},</if>
<if test="modelName != null and modelName != ''">#{modelName},</if>
</trim>
</insert>
<insert id="saveBatch" parameterType="com.fastbee.iot.domain.DeviceLog">
insert into iot_device_log (log_type,log_value,device_id,device_name,serial_number,identify,create_by,
@@ -101,7 +140,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
values
<foreach collection="list" item="item" index="index" separator=",">
(#{item.logType},#{item.logValue},#{item.deviceId},#{item.deviceName},#{item.serialNumber},
#{item.identity},#{item.createBy},#{item.isMonitor},#{item.mode},#{item.createTime},#{item.remark},
#{item.identify},#{item.createBy},#{item.isMonitor},#{item.mode},#{item.createTime},#{item.remark},
#{item.userId},#{item.userName},#{item.tenantId},#{item.tenantName},#{item.modelName})
</foreach>
</insert>
@@ -114,7 +153,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="deviceId != null">device_id = #{deviceId},</if>
<if test="deviceName != null and deviceName != ''">device_name = #{deviceName},</if>
<if test="serialNumber != null and serialNumber != ''">serial_number = #{serialNumber},</if>
<if test="identity != null">identify = #{identity},</if>
<if test="identify != null">identify = #{identify},</if>
<if test="createBy != null">create_by = #{createBy},</if>
<if test="isMonitor != null">is_monitor = #{isMonitor},</if>
<if test="mode != null">mode = #{mode},</if>
@@ -150,7 +189,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="deviceId != null and deviceId !=0"> and device_id = #{deviceId}</if>
<if test="serialNumber != null and serialNumber !=''"> and serial_number = #{serialNumber}</if>
<if test="logType != null "> and log_type = #{logType}</if>
<if test="identity != null and identity != ''"> and identity like concat('%', #{identity}, '%')</if>
<if test="identify != null and identify != ''"> and identify like concat('%', #{identify}, '%')</if>
</where>
order by create_time desc
</select>

View File

@@ -6,7 +6,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<resultMap type="com.fastbee.iot.domain.EventLog" id="EventLogResult">
<result property="logId" column="log_id" />
<result property="identity" column="identify" />
<result property="identify" column="identify" />
<result property="modelName" column="model_name" />
<result property="logType" column="log_type" />
<result property="logValue" column="log_value" />
@@ -31,7 +31,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<select id="selectEventLogList" parameterType="com.fastbee.iot.domain.EventLog" resultMap="EventLogResult">
<include refid="selectEventLogVo"/>
<where>
<if test="identity != null and identity != ''"> and identify = #{identity}</if>
<if test="identify != null and identify != ''"> and identify = #{identify}</if>
<if test="modelName != null and modelName != ''"> and model_name like concat('%', #{modelName}, '%')</if>
<if test="logType != null "> and log_type = #{logType}</if>
<if test="logValue != null and logValue != ''"> and log_value = #{logValue}</if>
@@ -62,7 +62,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<insert id="insertEventLog" parameterType="com.fastbee.iot.domain.EventLog" useGeneratedKeys="true" keyProperty="logId">
insert into iot_event_log
<trim prefix="(" suffix=")" suffixOverrides=",">
<if test="identity != null and identity != ''">identify,</if>
<if test="identify != null and identify != ''">identify,</if>
<if test="modelName != null">model_name,</if>
<if test="logType != null">log_type,</if>
<if test="logValue != null and logValue != ''">log_value,</if>
@@ -80,7 +80,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="remark != null">remark,</if>
</trim>
<trim prefix="values (" suffix=")" suffixOverrides=",">
<if test="identity != null and identity != ''">#{identity},</if>
<if test="identify != null and identify != ''">#{identify},</if>
<if test="modelName != null">#{modelName},</if>
<if test="logType != null">#{logType},</if>
<if test="logValue != null and logValue != ''">#{logValue},</if>
@@ -104,7 +104,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
user_name,tenant_id,tenant_name,create_by,create_time,remark)
values
<foreach collection="list" separator="," index="index" item="item">
(#{item.identity},#{item.modelName},#{item.logType},#{item.logValue},#{item.deviceId},#{item.deviceName},#{item.serialNumber},#{item.isMonitor},
(#{item.identify},#{item.modelName},#{item.logType},#{item.logValue},#{item.deviceId},#{item.deviceName},#{item.serialNumber},#{item.isMonitor},
#{item.mode},#{item.userId},#{item.userName},#{item.tenantId},#{item.tenantName},#{item.createBy},#{item.createTime},#{item.remark})
</foreach>
</insert>
@@ -112,7 +112,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<update id="updateEventLog" parameterType="com.fastbee.iot.domain.EventLog">
update iot_event_log
<trim prefix="SET" suffixOverrides=",">
<if test="identity != null and identity != ''">identify = #{identity},</if>
<if test="identify != null and identify != ''">identify = #{identify},</if>
<if test="modelName != null">model_name = #{modelName},</if>
<if test="logType != null">log_type = #{logType},</if>
<if test="logValue != null and logValue != ''">log_value = #{logValue},</if>

View File

@@ -0,0 +1,182 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper
PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.fastbee.iot.mapper.IotDbLogMapper">
<resultMap type="com.fastbee.iot.model.MonitorModel" id="MonitorResult">
<result property="value" column="root.ln.device_log.log_value" />
<result property="time" column="Time" />
</resultMap>
<resultMap type="com.fastbee.iot.domain.DeviceLog" id="DeviceLogResult">
<result property="createTime" column="Time" />
<result property="logType" column="root.ln.device_log.log_type" />
<result property="logValue" column="root.ln.device_log.log_value" />
<result property="deviceId" column="root.ln.device_log.device_id" />
<result property="deviceName" column="root.ln.device_log.device_name" />
<result property="serialNumber" column="root.ln.device_log.serial_number" />
<result property="identify" column="root.ln.device_log.identify" />
<result property="createBy" column="root.ln.device_log.create_by" />
<result property="isMonitor" column="root.ln.device_log.is_monitor" />
<result property="mode" column="root.ln.device_log.mode" />
<result property="remark" column="root.ln.device_log.remark" />
<result property="tenantId" column="root.ln.device_log.tenant_id" />
</resultMap>
<resultMap type="com.fastbee.iot.model.HistoryModel" id="HistoryResult">
<result property="time" column="Time" />
<result property="value" column="root.ln.device_log.log_value" />
<result property="identify" column="root.ln.device_log.identify" />
</resultMap>
<!-- <resultMap type="com.fastbee.iot.model.HistoryBo" id="HistoryResultBo">-->
<!-- <result property="value" column="root.ln.device_log.log_value" />-->
<!-- <result property="time" column="Time" />-->
<!-- <result property="identify" column="root.ln.device_log.identify" />-->
<!-- </resultMap>-->
<!-- <resultMap type="com.fastbee.iot.model.vo.ThingsModelLogCountVO" id="ThingsModelLogCountVO">-->
<!-- <result property="identifier" column="root.ln.device_log.identify" />-->
<!-- </resultMap>-->
<update id="createDB">
create database ${database}
</update>
<select id="countDB" resultType="Long">
count databases ${database}
</select>
<insert id="save" parameterType="com.fastbee.iot.domain.DeviceLog" useGeneratedKeys="false">
INSERT INTO root.ln.device_log (
<trim suffixOverrides=",">
<if test="logType != null">log_type</if>
<if test="logValue != null">, log_value</if>
<if test="deviceId != null">, device_id</if>
<if test="deviceName != null and deviceName != ''">, device_name</if>
<if test="serialNumber != null and serialNumber != ''">,serial_number</if>
<if test="identify != null">, identify</if>
<if test="createBy != null">, create_by</if>
<if test="isMonitor != null">, is_monitor</if>
<if test="mode != null">, mode</if>
<if test="remark != null">, remark</if>
<if test="tenantId != null">, tenant_id</if>
</trim>
) VALUES (
<trim suffixOverrides=",">
<if test="logType != null">#{logType}</if>
<if test="logValue != null">, #{logValue}</if>
<if test="deviceId != null">, #{deviceId}</if>
<if test="deviceName != null and deviceName != ''">, #{deviceName}</if>
<if test="serialNumber != null and serialNumber != ''">,#{serialNumber}</if>
<if test="identify != null">, #{identify}</if>
<if test="createBy != null">, #{createBy}</if>
<if test="isMonitor != null">, #{isMonitor}</if>
<if test="mode != null">, #{mode}</if>
<if test="remark != null">, #{remark}</if>
<if test="tenantId != null">, #{tenantId}</if>
</trim>
)
</insert>
<delete id="deleteDeviceLogByDeviceNumber" parameterType="String">
DELETE FROM root.ln.device_log.**
</delete>
<select id="selectPropertyLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
SELECT COUNT(mode)
FROM root.ln.device_log
WHERE log_type = 1
<if test="device.tenantId != null">AND tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectFunctionLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode)
from root.ln.device_log
where log_type=2
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
</select>
<select id="selectEventLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode)
from root.ln.device_log
where log_type=3
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectMonitorLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode)
from root.ln.device_log
where log_type=1 and is_monitor=1
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectMonitorList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="MonitorResult">
SELECT log_value FROM root.ln.device_log
<where>
is_monitor = 1
<if test="device.serialNumber != null and device.serialNumber != ''">
AND serial_number = '${device.serialNumber}'
</if>
<if test="device.identify != null and device.identify != ''">
AND identify LIKE '${device.identify}'
</if>
<if test="device.beginTime != null and device.endTime != null">
AND time <![CDATA[ >= ]]> ${device.beginTime} and time <![CDATA[ <= ]]> ${device.endTime}
</if>
</where>
ORDER BY time DESC
LIMIT #{device.total}
</select>
<select id="selectDeviceLogList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="DeviceLogResult">
SELECT log_type, log_value, device_id, device_name, identify, create_by, is_monitor, mode, tenant_id, remark, model_name
FROM root.ln.device_log
<where>
<if test="device.serialNumber != null and device.serialNumber != ''">
and serial_number = #{device.serialNumber}
</if>
<if test="device.isMonitor != null">
AND is_monitor = #{device.isMonitor}
</if>
<if test="device.logType != null">
AND log_type = #{device.logType}
</if>
<if test="device.logType == null">
AND log_type != 7
</if>
<if test="device.identify != null and device.identify != ''">
AND identify LIKE '${device.identify}'
</if>
</where>
ORDER BY time DESC limit #{page.pageSize} offset #{page.pageNum}
</select>
<select id="selectEventLogList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="DeviceLogResult">
SELECT log_type, log_value, device_id, device_name, serial_number, identify, create_by, is_monitor, mode, remark, tenant_id FROM root.ln.device_log
<where>
<if test="device.serialNumber != null and device.serialNumber != ''">
and serial_number = '${device.serialNumber}'
</if>
<if test="device.isMonitor != null"> and is_monitor = #{device.isMonitor}</if>
<if test="device.logType != null "> and log_type = #{device.logType}</if>
<if test="device.logType == null "> and log_type != 1
and log_type != 2
and log_type != 4
and log_type != 7
</if>
<if test="device.identify != null and device.identify != ''"> and identify like '${device.identify}' </if>
<if test="device.beginTime != null and device.beginTime != '' and device.endTime != null and device.endTime != ''">
and time <![CDATA[ >= ]]> ${device.beginTime} and time <![CDATA[ <= ]]> ${device.endTime}
</if>
</where>
order by time desc
</select>
</mapper>

View File

@@ -0,0 +1,176 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper
PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.fastbee.iot.mapper.TDDeviceLogMapper">
<resultMap type="com.fastbee.iot.model.MonitorModel" id="MonitorResult">
<result property="value" column="log_value" />
<result property="time" column="ts" />
</resultMap>
<resultMap type="com.fastbee.iot.domain.DeviceLog" id="DeviceLogResult">
<result property="logType" column="log_type" />
<result property="logValue" column="log_value" />
<result property="mode" column="mode" />
<result property="deviceId" column="device_id" />
<result property="deviceName" column="device_name" />
<result property="serialNumber" column="serial_number" />
<result property="identify" column="identify" />
<result property="createBy" column="create_by" />
<result property="isMonitor" column="is_monitor" />
<result property="createTime" column="ts" />
<result property="userId" column="user_id" />
<result property="userName" column="user_name" />
<result property="tenantId" column="tenant_id" />
<result property="tenantName" column="tenant_name" />
<result property="remark" column="remark" />
</resultMap>
<!-- <resultMap type="com.fastbee.iot.model.HistoryModel" id="HistoryResult">-->
<!-- <result property="time" column="ts" />-->
<!-- <result property="value" column="log_value" />-->
<!-- <result property="identify" column="identify" />-->
<!-- <result property="moderName" column="mode" />-->
<!-- </resultMap>-->
<!-- <resultMap type="com.fastbee.iot.model.HistoryBo" id="HistoryResultBo">-->
<!-- <result property="value" column="log_value" />-->
<!-- <result property="time" column="ts" />-->
<!-- <result property="identify" column="identify" />-->
<!-- </resultMap>-->
<update id="createDB">
create database if not exists ${database} vgroups 4;
</update>
<update id="createSTable">
create STABLE if not exists ${database}.device_log
(ts timestamp,
log_value BINARY(100),
is_monitor TINYINT,
log_type TINYINT,
identify BINARY(100),
mode TINYINT,
remark BINARY(500),
tenant_id BIGINT,
create_by BINARY(50))
TAGS(serial_number BINARY(50));
</update>
<insert id="save" parameterType="com.fastbee.iot.domain.DeviceLog" useGeneratedKeys="false">
insert into ${database}.device_${device.serialNumber} using device_log
tags (#{device.serialNumber})
values (
<if test="device.ts != null">
#{device.ts},
</if>
<if test="device.ts == null">
now,
</if>
#{device.logValue},
#{device.isMonitor},
#{device.logType},
#{device.identify},
#{device.mode},
#{device.remark},
#{device.tenantId},
#{device.createBy});
</insert>
<insert id="saveBatch" parameterType="com.fastbee.iot.tsdb.model.TdLogDto" useGeneratedKeys="false">
insert into ${database}.device_${data.serialNumber} using device_log
tags (#{data.serialNumber})
values
<foreach collection="data.list" separator=" " item="device" index="index">
(now,
#{device.logValue},
#{device.isMonitor},
#{device.logType},
#{device.identify},
#{device.mode},
#{device.remark},
#{device.tenantId},
#{device.createBy})
</foreach>
</insert>
<delete id="deleteDeviceLogByDeviceNumber" parameterType="com.fastbee.iot.domain.DeviceLog">
DROP TABLE IF EXISTS ${database}.device_${serialNumber};
</delete>
<select id="selectPropertyLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode) as propertyCount
from ${database}.device_log
where log_type=1
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectFunctionLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode) as functionCount
from ${database}.device_log
where log_type=2
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
</select>
<select id="selectEventLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode) as eventCount
from ${database}.device_log
where log_type=3
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectMonitorLogCount" parameterType="com.fastbee.iot.domain.Device" resultType="Long">
select count(mode) as monitorCount
from ${database}.device_log
where log_type=1 and is_monitor=1
<if test="device.tenantId != null"> and tenant_id = #{device.tenantId}</if>
<if test="device.createBy != null and device.createBy != ''"> AND create_by = #{device.createBy}</if>
</select>
<select id="selectMonitorList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="MonitorResult">
select log_value, ts from ${database}.device_log
<where>
is_monitor=1
<if test="device.serialNumber != null and device.serialNumber !=''"> and serial_number = #{device.serialNumber}</if>
<if test="device.identify != null and device.identify != ''"> and identify like #{device.identify}</if>
<if test="device.beginTime != null and device.beginTime != '' and device.endTime != null and device.endTime != ''"> and ts between #{device.beginTime} and #{device.endTime}</if>
order by ts desc
limit #{device.total}
</where>
</select>
<select id="selectDeviceLogList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="DeviceLogResult">
select * from ${database}.device_log
<where>
<if test="device.isMonitor != null"> and is_monitor = #{device.isMonitor}</if>
<if test="device.serialNumber != null and device.serialNumber !=''"> and serial_number = #{device.serialNumber}</if>
<if test="device.logType != null "> and log_type = #{device.logType}</if>
<if test="device.logType == null "> and log_type != 7</if>
<if test="device.identify != null and device.identify != ''"> and identify like #{device.identify}</if>
</where>
order by ts desc
</select>
<select id="selectEventLogList" parameterType="com.fastbee.iot.domain.DeviceLog" resultMap="DeviceLogResult">
select * from ${database}.device_log
<where>
<if test="device.isMonitor != null"> and is_monitor = #{device.isMonitor}</if>
<if test="device.logType != null "> and log_type = #{device.logType}</if>
<if test="device.logType == null "> and log_type != 1
and log_type != 2
and log_type != 4
and log_type != 7 </if>
<if test="device.serialNumber != null and device.serialNumber !=''"> and serial_number = #{device.serialNumber}</if>
<if test="device.identify != null and device.identify != ''"> and identify like #{device.identify}</if>
<if test="device.beginTime != null and device.beginTime != '' and device.endTime != null and device.endTime != ''">
and ts between #{device.beginTime} and #{device.endTime}
</if>
</where>
order by ts desc
</select>
</mapper>

View File

@@ -170,7 +170,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="status != null and status != ''">#{status},</if>
<if test="createBy != null and createBy != ''">#{createBy},</if>
<if test="remark != null and remark != ''">#{remark},</if>
sysdate()
current_timestamp
)
</insert>
@@ -189,7 +189,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
<if test="loginDate != null">login_date = #{loginDate},</if>
<if test="updateBy != null and updateBy != ''">update_by = #{updateBy},</if>
<if test="remark != null">remark = #{remark},</if>
update_time = sysdate()
update_time = current_timestamp
</set>
where user_id = #{userId}
</update>

View File

@@ -42,7 +42,8 @@
<mybatis-plus.version>3.5.3.1</mybatis-plus.version>
<mybatis-plus-generator.version>3.5.3.1</mybatis-plus-generator.version>
<dynamic-datasource.version>4.3.1</dynamic-datasource.version>
<tdengine.version>2.0.38</tdengine.version>
<tdengine.version>3.4.0</tdengine.version>
<iotdb.version>1.3.3</iotdb.version>
<guava.version>32.0.1-jre</guava.version>
<lock4j.version>2.2.3</lock4j.version>
<easyexcel.version>3.3.1</easyexcel.version>

View File

@@ -0,0 +1,64 @@
create database root.ln
DELETE database root.ln
count databases root.ln
//
CREATE timeseries root.ln.device_log.log_type WITH DATATYPE=INT32, ENCODING=RLE;
CREATE timeseries root.ln.device_log.log_value WITH DATATYPE=TEXT, ENCODING=PLAIN;
CREATE timeseries root.ln.device_log.device_id WITH DATATYPE=INT64, ENCODING=RLE;
CREATE timeseries root.ln.device_log.device_name WITH DATATYPE=TEXT, ENCODING=PLAIN;
CREATE timeseries root.ln.device_log.serial_number WITH DATATYPE=TEXT, ENCODING=PLAIN;
CREATE timeseries root.ln.device_log.identify WITH DATATYPE=TEXT, ENCODING=PLAIN;
CREATE timeseries root.ln.device_log.create_by WITH DATATYPE=TEXT, ENCODING=PLAIN;
CREATE timeseries root.ln.device_log.is_monitor WITH DATATYPE=INT32, ENCODING=RLE;
CREATE timeseries root.ln.device_log.mode WITH DATATYPE=INT32, ENCODING=RLE;
CREATE timeseries root.ln.device_log.tenant_id WITH DATATYPE=INT32, ENCODING=RLE;
CREATE timeseries root.ln.device_log.remark WITH DATATYPE=TEXT, ENCODING=PLAIN;
-- // 删除测点
-- delete timeseries root.ln.device_log.**
-- drop timeseries root.ln.device_log.**
--
-- // 设置ttl
-- set ttl to root.ln 360000
-- unset ttl from root.ln
--
-- // 创建设备模板
-- create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY)
-- create device template t2 aligned (lat FLOAT encoding=Gorilla, lon FLOAT encoding=Gorilla)
-- set device template t1 to root.device_log.sn1
-- set device template t2 to root.device_log.sn2
-- create timeseries using device template on root.device_log.sn1
-- create timeseries using device template on root.device_log.sn2
-- // 解绑设备模板
-- delete timeseries of device template t1 from root.device_log.sn1
-- deactivate device template t1 from root.device_log.sn1
-- unset device template t1 from root.device_log.sn1
-- // 删除设备模板
-- drop device template t1
-- // 查看设备模板
-- show device templates
-- show nodes in device template t1
-- show nodes in device template t2
-- show paths set device template t1
-- show paths using device template t1
-- // 插入数据
-- insert into root.device_log.sn3(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by)
-- values(1,1,'设备1',1,'100','1',1,1,'设备1','备注','admin')
--
-- insert into root.device_log.sn4(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by)
-- values(1,1,'设备2',1,'100','1',1,1,'设备2','备注','admin');
-- insert into root.device_log.sn4(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by)
-- values(1,1,'设备2',1,'101','1',1,1,'设备2','备注','admin')
-- // 查询插入数据
-- select * from root.device_log.sn3
-- select * from root.device_log.sn4
-- select * from root.device_log.D1ELV3A5TOJS