diff --git a/docker/data/databasets.yml b/docker/data/databasets.yml new file mode 100644 index 00000000..9cb24eba --- /dev/null +++ b/docker/data/databasets.yml @@ -0,0 +1,70 @@ +version: '2' + +networks: + network: + driver: bridge + ipam: + driver: default + config: + - subnet: 177.7.0.0/16 + +services: + tdengine: + image: 'tdengine/tdengine:3.3.5.8' + container_name: tdengine + privileged: true + hostname: fastbee + ports: + - 6030:6030 + - 6041:6041 + - 6043-6049:6043-6049 + - 6043-6049:6043-6049/udp + volumes: + - /var/data/tdengine/log:/var/log/taos + - /var/data/tdengine/data:/var/lib/taos + - /var/data/tdengine/conf:/etc/taos + - /etc/localtime:/etc/localtime + environment: + TZ: Asia/Shanghai + networks: + network: + ipv4_address: 177.7.0.30 + + influxdb: + image: influxdb:2.7.5 + container_name: influxdb + ports: + - 8086:8086 + volumes: + - ./influxdb2:/var/lib/influxdb2 + environment: + DOCKER_INFLUXDB_INIT_MODE: "setup" + DOCKER_INFLUXDB_INIT_USERNAME: "admin" + DOCKER_INFLUXDB_INIT_PASSWORD: "admin123" + DOCKER_INFLUXDB_INIT_ORG: "fastbee" + DOCKER_INFLUXDB_INIT_BUCKET: "device_log" + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: "inX0k-IPfSgKg6AIfoZm6Mv0DQyQOKCkfvs5ZF3a836Yzx2Ew9QgxsHev40_2gztuMn6tofwyS6nfbT4cD-SeA==" + networks: + network: + ipv4_address: 177.7.0.31 + + iotdb: + image: apache/iotdb:1.3.3-standalone + hostname: iotdb + container_name: iotdb + restart: always + ports: + - 6667:6667 + - 5555:5555 + - 8070:8070 + - 9003:9003 + privileged: true + volumes: + - /var/data/iotdb/data:/iotdb/data + - /var/data/iotdb/logs:/iotdb/logs + - /var/data/iotdb/conf:/iotdb/conf + environment: + TZ: Asia/Shanghai + networks: + network: + ipv4_address: 177.7.0.32 diff --git a/docker/data/docker-compose.yml b/docker/data/docker-compose.yml index d9b98822..5b9d659d 100644 --- a/docker/data/docker-compose.yml +++ b/docker/data/docker-compose.yml @@ -63,6 +63,7 @@ services: - redis - mysql - zlmedia + - tdengine volumes: - /var/data/java/fastbee-admin.jar:/server.jar - /var/data/java/uploadPath:/uploadPath @@ -117,5 +118,25 @@ services: network: ipv4_address: 177.7.0.15 +# tdengine: +# image: 'tdengine/tdengine:3.3.5.8' +# container_name: tdengine +# privileged: true +# hostname: fastbee +# ports: +# - 6030:6030 +# - 6041:6041 +# - 6043-6049:6043-6049 +# - 6043-6049:6043-6049/udp +# volumes: +# - /var/data/tdengine/log:/var/log/taos +# - /var/data/tdengine/data:/var/lib/taos +# - /var/data/tdengine/conf:/etc/taos +# - /etc/localtime:/etc/localtime +# environment: +# TZ: Asia/Shanghai +# networks: +# network: +# ipv4_address: 177.7.0.16 diff --git a/docker/data/iotdb/conf/confignode-env.bat b/docker/data/iotdb/conf/confignode-env.bat new file mode 100644 index 00000000..d270c08f --- /dev/null +++ b/docker/data/iotdb/conf/confignode-env.bat @@ -0,0 +1,156 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +@REM You can set datanode memory size, example '2G' or '2048M' +set MEMORY_SIZE= + +@REM true or false +@REM DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (%CONFIGNODE_CONF%\jmx.password and %{CONFIGNODE_CONF%\jmx.access) +set JMX_LOCAL="true" +set JMX_PORT="32000" +@REM only take effect when the jmx_local=false +@REM You need to change this IP as a public IP if you want to remotely connect IoTDB ConfigNode by JMX. +@REM 0.0.0.0 is not allowed +set JMX_IP="127.0.0.1" + +if %JMX_LOCAL% == "false" ( + echo "setting remote JMX..." + @REM you may have no permission to run chmod. If so, contact your system administrator. + set CONFIGNODE_JMX_OPTS=-Dcom.sun.management.jmxremote^ + -Dcom.sun.management.jmxremote.port=%JMX_PORT%^ + -Dcom.sun.management.jmxremote.rmi.port=%JMX_PORT%^ + -Djava.rmi.server.randomIDs=true^ + -Dcom.sun.management.jmxremote.ssl=false^ + -Dcom.sun.management.jmxremote.authenticate=false^ + -Dcom.sun.management.jmxremote.password.file="%CONFIGNODE_CONF%\jmx.password"^ + -Dcom.sun.management.jmxremote.access.file="%CONFIGNODE_CONF%\jmx.access"^ + -Djava.rmi.server.hostname=%JMX_IP% +) else ( + echo "setting local JMX..." +) + +set CONFIGNODE_JMX_OPTS=%CONFIGNODE_JMX_OPTS% -Diotdb.jmx.local=%JMX_LOCAL% + +for /f %%b in ('wmic cpu get numberofcores ^| findstr "[0-9]"') do ( + set system_cpu_cores=%%b +) + +if %system_cpu_cores% LSS 1 set system_cpu_cores=1 + +for /f %%b in ('wmic ComputerSystem get TotalPhysicalMemory ^| findstr "[0-9]"') do ( + set system_memory=%%b +) + +echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > "%CONFIGNODE_HOME%\sbin\tmp.vbs" +for /f "tokens=*" %%a in ('cscript //nologo "%CONFIGNODE_HOME%\sbin\tmp.vbs"') do set system_memory_in_mb=%%a +del "%CONFIGNODE_HOME%\sbin\tmp.vbs" +set system_memory_in_mb=%system_memory_in_mb:,=% + +@REM suggest using memory, system memory 3 / 10 +set /a suggest_=%system_memory_in_mb%/10*3 + +if "%MEMORY_SIZE%"=="" ( + set /a memory_size_in_mb=%suggest_% +) else ( + if "%MEMORY_SIZE:~-1%"=="M" ( + set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1% + ) else if "%MEMORY_SIZE:~-1%"=="G" ( + set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%*1024 + ) else ( + echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G." + exit /b 1 + ) +) + +@REM set on heap memory size +@REM when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3 +@REM when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4 +@REM when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7 +@REM when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024 +if %memory_size_in_mb% LSS 4096 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/4*3 +) else if %memory_size_in_mb% LSS 16384 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/5*4 +) else if %memory_size_in_mb% LSS 131072 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/8*7 +) else ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%-16384 +) +set /a off_heap_memory_size_in_mb=%memory_size_in_mb%-%on_heap_memory_size_in_mb% + +set ON_HEAP_MEMORY=%on_heap_memory_size_in_mb%M +set OFF_HEAP_MEMORY=%off_heap_memory_size_in_mb%M + +set IOTDB_ALLOW_HEAP_DUMP="true" + +@REM on heap memory size +@REM set ON_HEAP_MEMORY=2G +@REM off heap memory size +@REM set OFF_HEAP_MEMORY=512M + +if "%OFF_HEAP_MEMORY:~-1%"=="M" ( + set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1% + ) else if "%OFF_HEAP_MEMORY:~-1%"=="G" ( + set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%*1024 + ) + +@REM threads number of io +set IO_THREADS_NUMBER=100 +@REM Max cached buffer size, Note: unit can only be B! +@REM which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER +set /a MAX_CACHED_BUFFER_SIZE=%off_heap_memory_size_in_mb%/%IO_THREADS_NUMBER%*1024*1024 + +set CONFIGNODE_HEAP_OPTS=-Xmx%ON_HEAP_MEMORY% -Xms%ON_HEAP_MEMORY% +set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -XX:MaxDirectMemorySize=%OFF_HEAP_MEMORY% +set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Djdk.nio.maxCachedBufferSize=%MAX_CACHED_BUFFER_SIZE% +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+CrashOnOutOfMemoryError + +@REM if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +@REM IOTDB_JMX_OPTS=%IOTDB_HEAP_OPTS% -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\tmp\confignode_heapdump.hprof + +@REM You can put your env variable here +@REM set JAVA_HOME=%JAVA_HOME% + +@REM set gc log. +IF "%1" equ "printgc" ( + IF "%JAVA_VERSION%" == "8" ( + md "%CONFIGNODE_HOME%\logs" + set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Xloggc:"%CONFIGNODE_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M + ) ELSE ( + md "%CONFIGNODE_HOME%\logs" + set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Xlog:gc=info,heap*=trace,age*=debug,safepoint=info,promotion*=trace:file="%CONFIGNODE_HOME%\logs\gc.log":time,uptime,pid,tid,level:filecount=10,filesize=10485760 + ) +) + +@REM Add args for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396) +IF "%JAVA_VERSION%" == "8" ( + set ILLEGAL_ACCESS_PARAMS= +) ELSE ( + set ILLEGAL_ACCESS_PARAMS=--add-opens=java.base/java.util.concurrent=ALL-UNNAMED^ + --add-opens=java.base/java.lang=ALL-UNNAMED^ + --add-opens=java.base/java.util=ALL-UNNAMED^ + --add-opens=java.base/java.nio=ALL-UNNAMED^ + --add-opens=java.base/java.io=ALL-UNNAMED^ + --add-opens=java.base/java.net=ALL-UNNAMED +) + +echo ConfigNode on heap memory size = %ON_HEAP_MEMORY%B, off heap memory size = %OFF_HEAP_MEMORY%B +echo If you want to change this configuration, please check conf/confignode-env.bat. diff --git a/docker/data/iotdb/conf/confignode-env.sh b/docker/data/iotdb/conf/confignode-env.sh new file mode 100644 index 00000000..202fc759 --- /dev/null +++ b/docker/data/iotdb/conf/confignode-env.sh @@ -0,0 +1,314 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# You can set ConfigNode memory size, example '2G' or '2048M' +MEMORY_SIZE= + +# You can put your env variable here +# export JAVA_HOME=$JAVA_HOME + +# Set max number of open files +max_num=$(ulimit -n) +if [ $max_num -le 65535 ]; then + ulimit -n 65535 + if [ $? -ne 0 ]; then + echo "Warning: Failed to set max number of files to be 65535, maybe you need to use 'sudo ulimit -n 65535' to set it when you use iotdb ConfigNode in production environments." + fi +fi + +# Set somaxconn to a better value to avoid meaningless connection reset issues when the system is under high load. +# The original somaxconn will be set back when the system reboots. +# For more detail, see: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4 +SOMAXCONN=65535 +case "$(uname)" in + Linux) + somaxconn=$(sysctl -n net.core.somaxconn) + if [ "$somaxconn" -lt $SOMAXCONN ]; then + echo "WARN:" + echo "WARN: the value of net.core.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command." + echo "WARN: sudo sysctl -w net.core.somaxconn=$SOMAXCONN" + echo "WARN: The original net.core.somaxconn value will be set back when the os reboots." + echo "WARN:" + fi + ;; + FreeBSD | Darwin) + somaxconn=$(sysctl -n kern.ipc.somaxconn) + if [ "$somaxconn" -lt $SOMAXCONN ]; then + echo "WARN:" + echo "WARN: the value of kern.ipc.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command." + echo "WARN: sudo sysctl -w kern.ipc.somaxconn=$SOMAXCONN" + echo "WARN: The original kern.ipc.somaxconn value will be set back when the os reboots." + echo "WARN:" + fi + ;; +esac + +# whether we allow enable heap dump files +IOTDB_ALLOW_HEAP_DUMP="true" + +calculate_memory_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m| sed -n '2p' | awk '{print $2}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + Darwin) + system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # suggest using memory, system memory 3 / 10 + suggest_using_memory_in_mb=`expr $system_memory_in_mb / 10 \* 3` + + if [ -n "$MEMORY_SIZE" ] + then + if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] || [ "${MEMORY_SIZE%"M"}" != "$MEMORY_SIZE" ] + then + if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] + then + memory_size_in_mb=`expr ${MEMORY_SIZE%"G"} "*" 1024` + else + memory_size_in_mb=`expr ${MEMORY_SIZE%"M"}` + fi + else + echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G" + exit 1 + fi + else + # set memory size to suggest using memory, if suggest using memory is greater than 8GB, set memory size to 8GB + if [ "$suggest_using_memory_in_mb" -gt "8192" ] + then + memory_size_in_mb="8192" + else + memory_size_in_mb=$suggest_using_memory_in_mb + fi + fi + + # set on heap memory size + # when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3 + # when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4 + # when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7 + # when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024 + if [ "$memory_size_in_mb" -lt "4096" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 4 \* 3` + elif [ "$memory_size_in_mb" -lt "16384" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 5 \* 4` + elif [ "$memory_size_in_mb" -lt "131072" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 8 \* 7` + else + on_heap_memory_size_in_mb=`expr $memory_size_in_mb - 16384` + fi + off_heap_memory_size_in_mb=`expr $memory_size_in_mb - $on_heap_memory_size_in_mb` + + ON_HEAP_MEMORY="${on_heap_memory_size_in_mb}M" + OFF_HEAP_MEMORY="${off_heap_memory_size_in_mb}M" +} + +CONFIGNODE_CONF_DIR="`dirname "$0"`" +get_cn_system_dir() { + local config_file="$1" + local cn_system_dir="" + + cn_system_dir=`sed '/^cn_system_dir=/!d;s/.*=//' ${CONFIGNODE_CONF_DIR}/${config_file} | tail -n 1` + + if [ -z "$cn_system_dir" ]; then + echo "" + return 0 + fi + + if [[ "$cn_system_dir" == /* ]]; then + echo "$cn_system_dir" + else + echo "$CONFIGNODE_CONF_DIR/../$cn_system_dir" + fi +} + +if [ -f "${CONFIGNODE_CONF_DIR}/iotdb-system.properties" ]; then + heap_dump_dir=$(get_cn_system_dir "iotdb-system.properties") +else + heap_dump_dir=$(get_cn_system_dir "iotdb-confignode.properties") +fi + +if [ -z "$heap_dump_dir" ]; then + heap_dump_dir="$(dirname "$0")/../data/confignode/system" +fi + +if [ ! -d "$heap_dump_dir" ]; then + mkdir -p "$heap_dump_dir" +fi + +# find java in JAVA_HOME +if [ -n "$JAVA_HOME" ]; then + for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do + if [ -x "$java" ]; then + JAVA="$java" + break + fi + done +else + JAVA=java +fi + +if [ -z $JAVA ] ; then + echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr + exit 1; +fi + +# Determine the sort of JVM we'll be running on. +java_ver_output=`"$JAVA" -version 2>&1` +jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} +if [ "$JVM_VERSION" \< "1.8" ] ; then + echo "IoTDB requires Java 8u92 or later." + exit 1; +fi + +if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 92 ] ; then + echo "IoTDB requires Java 8u92 or later." + exit 1; +fi + +version_arr=(${JVM_VERSION//./ }) + +illegal_access_params="" +#GC log path has to be defined here because it needs to access CONFIGNODE_HOME +if [ "${version_arr[0]}" = "1" ] ; then + # Java 8 + MAJOR_VERSION=${version_arr[1]} + echo "$CONFIGNODE_JMX_OPTS" | grep -q "^-[X]loggc" + if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line + # only add -Xlog:gc if it's not mentioned in jvm-server.options file + mkdir -p ${CONFIGNODE_HOME}/logs + if [ "$#" -ge "1" -a "$1" == "printgc" ]; then + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xloggc:${CONFIGNODE_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M" + fi + fi +else + #JDK 11 and others + MAJOR_VERSION=${version_arr[0]} + # See description of https://bugs.openjdk.java.net/browse/JDK-8046148 for details about the syntax + # The following is the equivalent to -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M + echo "$CONFIGNODE_JMX_OPTS" | grep -q "^-[X]log:gc" + if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line + # only add -Xlog:gc if it's not mentioned in jvm-server.options file + mkdir -p ${CONFIGNODE_HOME}/logs + if [ "$#" -ge "1" -a "$1" == "printgc" ]; then + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xlog:gc=info,heap*=info,age*=info,safepoint=info,promotion*=info:file=${CONFIGNODE_HOME}/logs/gc.log:time,uptime,pid,tid,level:filecount=10,filesize=10485760" + fi + fi + # Add argLine for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396) + illegal_access_params="$illegal_access_params --add-opens=java.base/java.util.concurrent=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.lang=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.util=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.nio=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.io=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.net=ALL-UNNAMED" +fi + + +calculate_memory_sizes + +# on heap memory size +#ON_HEAP_MEMORY="2G" +# off heap memory size +#OFF_HEAP_MEMORY="512M" + +if [ "${OFF_HEAP_MEMORY%"G"}" != "$OFF_HEAP_MEMORY" ] +then + off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"G"} "*" 1024` +else + off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"M"}` +fi + +# threads number of io +IO_THREADS_NUMBER="100" +# Max cached buffer size, Note: unit can only be B! +# which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER +MAX_CACHED_BUFFER_SIZE=`expr $off_heap_memory_size_in_mb \* 1024 \* 1024 / $IO_THREADS_NUMBER` + +#true or false +#DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (${CONFIGNODE_CONF}/jmx.password and ${CONFIGNODE_CONF}/jmx.access) +#If you want to connect JMX Service by network in local machine, such as nodeTool.sh will try to connect 127.0.0.1:31999, please set JMX_LOCAL to false. +JMX_LOCAL="true" + +JMX_PORT="32000" +#only take effect when the jmx_local=false +#You need to change this IP as a public IP if you want to remotely connect IoTDB ConfigNode by JMX. +# 0.0.0.0 is not allowed +JMX_IP="127.0.0.1" + +if [ ${JMX_LOCAL} = "false" ]; then + echo "setting remote JMX..." + #you may have no permission to run chmod. If so, contact your system administrator. + chmod 600 ${CONFIGNODE_CONF}/jmx.password + chmod 600 ${CONFIGNODE_CONF}/jmx.access + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djava.rmi.server.randomIDs=true" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.ssl=false" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.authenticate=true" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=${CONFIGNODE_CONF}/jmx.password" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=${CONFIGNODE_CONF}/jmx.access" + CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djava.rmi.server.hostname=$JMX_IP" +else + echo "setting local JMX..." +fi + +CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Diotdb.jmx.local=$JMX_LOCAL" +CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xms${ON_HEAP_MEMORY}" +CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Xmx${ON_HEAP_MEMORY}" +CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -XX:MaxDirectMemorySize=${OFF_HEAP_MEMORY}" +CONFIGNODE_JMX_OPTS="$CONFIGNODE_JMX_OPTS -Djdk.nio.maxCachedBufferSize=${MAX_CACHED_BUFFER_SIZE}" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+CrashOnOutOfMemoryError" +# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +#IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/confignode_heapdump.hprof" + +echo "ConfigNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B" +echo "If you want to change this configuration, please check conf/confignode-env.sh." + diff --git a/docker/data/iotdb/conf/datanode-env.bat b/docker/data/iotdb/conf/datanode-env.bat new file mode 100644 index 00000000..16921072 --- /dev/null +++ b/docker/data/iotdb/conf/datanode-env.bat @@ -0,0 +1,187 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off + +@REM You can set datanode memory size, example '2G' or '2048M' +set MEMORY_SIZE= + +@REM true or false +@REM DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (%IOTDB_CONF%\jmx.password and %{IOTDB_CONF%\jmx.access) +set JMX_LOCAL="true" +set JMX_PORT="31999" +@REM only take effect when the jmx_local=false +@REM You need to change this IP as a public IP if you want to remotely connect IoTDB by JMX. +@REM 0.0.0.0 is not allowed +set JMX_IP="127.0.0.1" + +if %JMX_LOCAL% == "false" ( + echo "setting remote JMX..." + @REM you may have no permission to run chmod. If so, contact your system administrator. + set IOTDB_JMX_OPTS=-Dcom.sun.management.jmxremote^ + -Dcom.sun.management.jmxremote.port=%JMX_PORT%^ + -Dcom.sun.management.jmxremote.rmi.port=%JMX_PORT%^ + -Djava.rmi.server.randomIDs=true^ + -Dcom.sun.management.jmxremote.ssl=false^ + -Dcom.sun.management.jmxremote.authenticate=false^ + -Dcom.sun.management.jmxremote.password.file="%IOTDB_CONF%\jmx.password"^ + -Dcom.sun.management.jmxremote.access.file="%IOTDB_CONF%\jmx.acces"s^ + -Djava.rmi.server.hostname=%JMX_IP% +) else ( + echo "setting local JMX..." +) + +set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Diotdb.jmx.local=%JMX_LOCAL% + +for /f %%b in ('wmic cpu get numberofcores ^| findstr "[0-9]"') do ( + set system_cpu_cores=%%b +) + +if %system_cpu_cores% LSS 1 set system_cpu_cores=1 + +for /f %%b in ('wmic ComputerSystem get TotalPhysicalMemory ^| findstr "[0-9]"') do ( + set system_memory=%%b +) + +echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > "%IOTDB_HOME%\sbin\tmp.vbs" +for /f "tokens=*" %%a in ('cscript //nologo "%IOTDB_HOME%\sbin\tmp.vbs"') do set system_memory_in_mb=%%a +del "%IOTDB_HOME%\sbin\tmp.vbs" +set system_memory_in_mb=%system_memory_in_mb:,=% + +set /a suggest_=%system_memory_in_mb%/2 + +if "%MEMORY_SIZE%"=="" ( + set /a memory_size_in_mb=%suggest_% +) else ( + if "%MEMORY_SIZE:~-1%"=="M" ( + set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1% + ) else if "%MEMORY_SIZE:~-1%"=="G" ( + set /a memory_size_in_mb=%MEMORY_SIZE:~0,-1%*1024 + ) else ( + echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G." + exit /b 1 + ) +) + +@REM set on heap memory size +@REM when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3 +@REM when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4 +@REM when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7 +@REM when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024 +if %memory_size_in_mb% LSS 4096 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/4*3 +) else if %memory_size_in_mb% LSS 16384 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/5*4 +) else if %memory_size_in_mb% LSS 131072 ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%/8*7 +) else ( + set /a on_heap_memory_size_in_mb=%memory_size_in_mb%-16384 +) +set /a off_heap_memory_size_in_mb=%memory_size_in_mb%-%on_heap_memory_size_in_mb% + +set ON_HEAP_MEMORY=%on_heap_memory_size_in_mb%M +set OFF_HEAP_MEMORY=%off_heap_memory_size_in_mb%M + +set IOTDB_ALLOW_HEAP_DUMP="true" + +@REM on heap memory size +@REM set ON_HEAP_MEMORY=2G +@REM off heap memory size +@REM set OFF_HEAP_MEMORY=512M + +if "%OFF_HEAP_MEMORY:~-1%"=="M" ( + set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1% + ) else if "%OFF_HEAP_MEMORY:~-1%"=="G" ( + set /a off_heap_memory_size_in_mb=%OFF_HEAP_MEMORY:~0,-1%*1024 + ) + +@REM threads number of io +set IO_THREADS_NUMBER=1000 +@REM Max cached buffer size, Note: unit can only be B! +@REM which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER +set /a MAX_CACHED_BUFFER_SIZE=%off_heap_memory_size_in_mb%/%IO_THREADS_NUMBER%*1024*1024 + +set IOTDB_HEAP_OPTS=-Xmx%ON_HEAP_MEMORY% -Xms%ON_HEAP_MEMORY% +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:MaxDirectMemorySize=%OFF_HEAP_MEMORY% +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Djdk.nio.maxCachedBufferSize=%MAX_CACHED_BUFFER_SIZE% +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+CrashOnOutOfMemoryError +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UseAdaptiveSizePolicy +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xss512k +@REM options below try to optimize safepoint stw time. +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UnlockDiagnosticVMOptions +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:GuaranteedSafepointInterval=0 +@REM these two options print safepoints with pauses longer than 1000ms to the standard output. You can see these logs via redirection when starting in the background like "start-datanode.sh > log_datanode_safepoint.txt" +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:SafepointTimeoutDelay=1000 +set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+SafepointTimeout + +@REM option below tries to optimize safepoint stw time for large counted loop. +@REM NOTE: it may have an impact on JIT's black-box optimization. +@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+UseCountedLoopSafepoints + +@REM When the GC time is too long, if there are remaining CPU resources, you can try to turn on and increase options below. +@REM for /F "tokens=2 delims==" %%I in ('wmic cpu get NumberOfCores /value') do ( +@REM set "CPU_PROCESSOR_NUM=%%I" +@REM ) +@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:ParallelGCThreads=%CPU_PROCESSOR_NUM% + +@REM if there are much of stw time of reference process in GC log, you can turn on option below. +@REM NOTE: it may have an impact on application's throughput. +@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+ParallelRefProcEnabled + +@REM this option can reduce the overhead caused by memory allocation, page fault interrupts, etc. during JVM operation. +@REM NOTE: it may reduce memory utilization and trigger OOM killer when memory is tight. +@REM set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -XX:+AlwaysPreTouch + +@REM if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +@REM set IOTDB_JMX_OPTS=%IOTDB_HEAP_OPTS% -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\tmp\datanode_heapdump.hprof + +@REM You can put your env variable here +@REM set JAVA_HOME=%JAVA_HOME% + +@REM set gc log. +IF "%1" equ "printgc" ( + IF "%JAVA_VERSION%" == "8" ( + md "%IOTDB_HOME%\logs" + set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xloggc:"%IOTDB_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M + @REM For more detailed GC information, you can uncomment option below. + @REM NOTE: more detailed GC information may bring larger GC log files. + @REM set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Xloggc:"%IOTDB_HOME%\logs\gc.log" -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:+PrintTenuringDistribution -XX:+PrintHeapAtGC -XX:+PrintReferenceGC -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M + ) ELSE ( + md "%IOTDB_HOME%\logs" + set IOTDB_HEAP_OPTS=%IOTDB_HEAP_OPTS% -Xlog:gc=info,heap*=trace,age*=debug,safepoint=info,promotion*=trace:file="%IOTDB_HOME%\logs\gc.log":time,uptime,pid,tid,level:filecount=10,filesize=10485760 + @REM For more detailed GC information, you can uncomment option below. + @REM NOTE: more detailed GC information may bring larger GC log files. + @REM set IOTDB_JMX_OPTS=%IOTDB_JMX_OPTS% -Xlog:gc*=debug,heap*=debug,age*=trace,metaspace*=info,safepoint*=debug,promotion*=info:file="%IOTDB_HOME%\logs\gc.log":time,uptime,pid,tid,level,tags:filecount=10,filesize=100M + ) +) + +@REM Add args for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396) +IF "%JAVA_VERSION%" == "8" ( + set ILLEGAL_ACCESS_PARAMS= +) ELSE ( + set ILLEGAL_ACCESS_PARAMS=--add-opens=java.base/java.util.concurrent=ALL-UNNAMED^ + --add-opens=java.base/java.lang=ALL-UNNAMED^ + --add-opens=java.base/java.util=ALL-UNNAMED^ + --add-opens=java.base/java.nio=ALL-UNNAMED^ + --add-opens=java.base/java.io=ALL-UNNAMED^ + --add-opens=java.base/java.net=ALL-UNNAMED +) + +echo DataNode on heap memory size = %ON_HEAP_MEMORY%B, off heap memory size = %OFF_HEAP_MEMORY%B +echo If you want to change this configuration, please check conf\datanode-env.bat. diff --git a/docker/data/iotdb/conf/datanode-env.sh b/docker/data/iotdb/conf/datanode-env.sh new file mode 100644 index 00000000..aea7cbcd --- /dev/null +++ b/docker/data/iotdb/conf/datanode-env.sh @@ -0,0 +1,351 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# You can set DataNode memory size, example '2G' or '2048M' +MEMORY_SIZE= + +# You can put your env variable here +# export JAVA_HOME=$JAVA_HOME + +# Set max number of open files +max_num=$(ulimit -n) +if [ $max_num -le 65535 ]; then + ulimit -n 65535 + if [ $? -ne 0 ]; then + echo "Warning: Failed to set max number of files to be 65535, maybe you need to use 'sudo ulimit -n 65535' to set it when you use iotdb in production environments." + fi +fi + +# Set somaxconn to a better value to avoid meaningless connection reset issues when the system is under high load. +# The original somaxconn will be set back when the system reboots. +# For more detail, see: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4 +SOMAXCONN=65535 +case "$(uname)" in + Linux) + somaxconn=$(sysctl -n net.core.somaxconn) + if [ "$somaxconn" -lt $SOMAXCONN ]; then + echo "WARN:" + echo "WARN: the value of net.core.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command." + echo "WARN: sudo sysctl -w net.core.somaxconn=$SOMAXCONN" + echo "WARN: The original net.core.somaxconn value will be set back when the os reboots." + echo "WARN:" + fi + ;; + FreeBSD | Darwin) + somaxconn=$(sysctl -n kern.ipc.somaxconn) + if [ "$somaxconn" -lt $SOMAXCONN ]; then + echo "WARN:" + echo "WARN: the value of kern.ipc.somaxconn (=$somaxconn) is too small, please set it to a larger value using the following command." + echo "WARN: sudo sysctl -w kern.ipc.somaxconn=$SOMAXCONN" + echo "WARN: The original kern.ipc.somaxconn value will be set back when the os reboots." + echo "WARN:" + fi + ;; +esac + +# whether we allow enable heap dump files +IOTDB_ALLOW_HEAP_DUMP="true" + +calculate_memory_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m| sed -n '2p' | awk '{print $2}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + Darwin) + system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # suggest using memory, system memory 1 / 2 + suggest_using_memory_in_mb=`expr $system_memory_in_mb / 2` + + if [ -n "$MEMORY_SIZE" ] + then + if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] || [ "${MEMORY_SIZE%"M"}" != "$MEMORY_SIZE" ] + then + if [ "${MEMORY_SIZE%"G"}" != "$MEMORY_SIZE" ] + then + memory_size_in_mb=`expr ${MEMORY_SIZE%"G"} "*" 1024` + else + memory_size_in_mb=`expr ${MEMORY_SIZE%"M"}` + fi + else + echo "Invalid format of MEMORY_SIZE, please use the format like 2048M or 2G" + exit 1 + fi + else + memory_size_in_mb=$suggest_using_memory_in_mb + fi + + # set on heap memory size + # when memory_size_in_mb is less than 4 * 1024, we will set on heap memory size to memory_size_in_mb / 4 * 3 + # when memory_size_in_mb is greater than 4 * 1024 and less than 16 * 1024, we will set on heap memory size to memory_size_in_mb / 5 * 4 + # when memory_size_in_mb is greater than 16 * 1024 and less than 128 * 1024, we will set on heap memory size to memory_size_in_mb / 8 * 7 + # when memory_size_in_mb is greater than 128 * 1024, we will set on heap memory size to memory_size_in_mb - 16 * 1024 + if [ "$memory_size_in_mb" -lt "4096" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 4 \* 3` + elif [ "$memory_size_in_mb" -lt "16384" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 5 \* 4` + elif [ "$memory_size_in_mb" -lt "131072" ] + then + on_heap_memory_size_in_mb=`expr $memory_size_in_mb / 8 \* 7` + else + on_heap_memory_size_in_mb=`expr $memory_size_in_mb - 16384` + fi + off_heap_memory_size_in_mb=`expr $memory_size_in_mb - $on_heap_memory_size_in_mb` + + ON_HEAP_MEMORY="${on_heap_memory_size_in_mb}M" + OFF_HEAP_MEMORY="${off_heap_memory_size_in_mb}M" +} + + +DATANODE_CONF_DIR="`dirname "$0"`" +# find first dir of dn_data_dirs from properties file +get_first_data_dir() { + local config_file="$1" + local data_dir_value="" + + data_dir_value=`sed '/^dn_data_dirs=/!d;s/.*=//' ${DATANODE_CONF_DIR}/${config_file} | tail -n 1` + + if [ -z "$data_dir_value" ]; then + echo "" + return 0 + fi + + local first_dir="" + + if [[ "$data_dir_value" == *";"* ]]; then + first_dir=$(echo "$data_dir_value" | cut -d';' -f1) + fi + if [[ "$first_dir" == *","* ]]; then + first_dir=$(echo "$first_dir" | cut -d',' -f1) + fi + + if [[ "$first_dir" == /* ]]; then + echo "$first_dir" + else + echo "$DATANODE_CONF_DIR/../$first_dir" + fi +} + +if [ -f "${DATANODE_CONF_DIR}/iotdb-system.properties" ]; then + heap_dump_dir=$(get_first_data_dir "iotdb-system.properties") +else + heap_dump_dir=$(get_first_data_dir "iotdb-datanode.properties") +fi + +if [ -z "$heap_dump_dir" ]; then + heap_dump_dir="$(dirname "$0")/../data/datanode/data" +fi +if [ ! -d "$heap_dump_dir" ]; then + mkdir -p "$heap_dump_dir" +fi + +# find java in JAVA_HOME +if [ -n "$JAVA_HOME" ]; then + for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do + if [ -x "$java" ]; then + JAVA="$java" + break + fi + done +else + JAVA=java +fi + +if [ -z $JAVA ] ; then + echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr + exit 1; +fi + +# Determine the sort of JVM we'll be running on. +java_ver_output=`"$JAVA" -version 2>&1` +jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} +if [ "$JVM_VERSION" \< "1.8" ] ; then + echo "IoTDB requires Java 8u92 or later." + exit 1; +fi + +if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 92 ] ; then + echo "IoTDB requires Java 8u92 or later." + exit 1; +fi + +version_arr=(${JVM_VERSION//./ }) + +illegal_access_params="" +#GC log path has to be defined here because it needs to access IOTDB_HOME +if [ "${version_arr[0]}" = "1" ] ; then + # Java 8 + MAJOR_VERSION=${version_arr[1]} + echo "$IOTDB_JMX_OPTS" | grep -q "^-[X]loggc" + if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line + # only add -Xlog:gc if it's not mentioned in jvm-server.options file + mkdir -p ${IOTDB_HOME}/logs + if [ "$#" -ge "1" -a "$1" == "printgc" ]; then + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xloggc:${IOTDB_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M" + # For more detailed GC information, you can uncomment option below. + # NOTE: more detailed GC information may bring larger GC log files. + # IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xloggc:${IOTDB_HOME}/logs/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+UseGCLogFileRotation -XX:+PrintTenuringDistribution -XX:+PrintHeapAtGC -XX:+PrintReferenceGC -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M" + fi + fi +else + #JDK 11 and others + MAJOR_VERSION=${version_arr[0]} + # See description of https://bugs.openjdk.java.net/browse/JDK-8046148 for details about the syntax + # The following is the equivalent to -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10M + echo "$IOTDB_JMX_OPTS" | grep -q "^-[X]log:gc" + if [ "$?" = "1" ] ; then # [X] to prevent ccm from replacing this line + # only add -Xlog:gc if it's not mentioned in jvm-server.options file + mkdir -p ${IOTDB_HOME}/logs + if [ "$#" -ge "1" -a "$1" == "printgc" ]; then + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xlog:gc=info,heap*=info,age*=info,safepoint=info,promotion*=info:file=${IOTDB_HOME}/logs/gc.log:time,uptime,pid,tid,level:filecount=10,filesize=10485760" + # For more detailed GC information, you can uncomment option below. + # NOTE: more detailed GC information may bring larger GC log files. + # IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xlog:gc*=debug,heap*=debug,age*=trace,metaspace*=info,safepoint*=debug,promotion*=info:file=${IOTDB_HOME}/logs/gc.log:time,uptime,pid,tid,level,tags:filecount=10,filesize=100M" + fi + fi + # Add argLine for Java 11 and above, due to [JEP 396: Strongly Encapsulate JDK Internals by Default] (https://openjdk.java.net/jeps/396) + illegal_access_params="$illegal_access_params --add-opens=java.base/java.util.concurrent=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.lang=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.util=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.nio=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.io=ALL-UNNAMED" + illegal_access_params="$illegal_access_params --add-opens=java.base/java.net=ALL-UNNAMED" +fi + + +calculate_memory_sizes + +# on heap memory size +#ON_HEAP_MEMORY="2G" +# off heap memory size +#OFF_HEAP_MEMORY="512M" + + +if [ "${OFF_HEAP_MEMORY%"G"}" != "$OFF_HEAP_MEMORY" ] +then + off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"G"} "*" 1024` +else + off_heap_memory_size_in_mb=`expr ${OFF_HEAP_MEMORY%"M"}` +fi + +# threads number for io +IO_THREADS_NUMBER="1000" +# Max cached buffer size, Note: unit can only be B! +# which equals OFF_HEAP_MEMORY / IO_THREADS_NUMBER +MAX_CACHED_BUFFER_SIZE=`expr $off_heap_memory_size_in_mb \* 1024 \* 1024 / $IO_THREADS_NUMBER` + +#true or false +#DO NOT FORGET TO MODIFY THE PASSWORD FOR SECURITY (${IOTDB_CONF}/jmx.password and ${IOTDB_CONF}/jmx.access) +#If you want to connect JMX Service by network in local machine, such as nodeTool.sh will try to connect 127.0.0.1:31999, please set JMX_LOCAL to false. +JMX_LOCAL="true" + +JMX_PORT="31999" +#only take effect when the jmx_local=false +#You need to change this IP as a public IP if you want to remotely connect IoTDB by JMX. +# 0.0.0.0 is not allowed +JMX_IP="127.0.0.1" + +if [ ${JMX_LOCAL} = "false" ]; then + echo "setting remote JMX..." + #you may have no permission to run chmod. If so, contact your system administrator. + chmod 600 ${IOTDB_CONF}/jmx.password + chmod 600 ${IOTDB_CONF}/jmx.access + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djava.rmi.server.randomIDs=true" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.ssl=false" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.authenticate=true" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=${IOTDB_CONF}/jmx.password" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=${IOTDB_CONF}/jmx.access" + IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djava.rmi.server.hostname=$JMX_IP" +else + echo "setting local JMX..." +fi + +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Diotdb.jmx.local=$JMX_LOCAL" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xms${ON_HEAP_MEMORY}" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xmx${ON_HEAP_MEMORY}" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:MaxDirectMemorySize=${OFF_HEAP_MEMORY}" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Djdk.nio.maxCachedBufferSize=${MAX_CACHED_BUFFER_SIZE}" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+CrashOnOutOfMemoryError" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+UseAdaptiveSizePolicy" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -Xss512k" +# these two options print safepoints with pauses longer than 1000ms to the standard output. You can see these logs via redirection when starting in the background like "start-datanode.sh > log_datanode_safepoint.log" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:SafepointTimeoutDelay=1000" +IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+SafepointTimeout" + +# option below tries to optimize safepoint stw time for large counted loop. +# NOTE: it may have an impact on JIT's black-box optimization. +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+UseCountedLoopSafepoints" + +# when the GC time is too long, if there are remaining CPU resources, you can try to turn on and increase options below. +# for Linux: +# CPU_PROCESSOR_NUM=$(nproc) +# for MacOS: +# CPU_PROCESSOR_NUM=$(sysctl -n hw.ncpu) +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:ParallelGCThreads=${CPU_PROCESSOR_NUM}" + +# if there are much of stw time of reference process in GC log, you can turn on option below. +# NOTE: it may have an impact on application's throughput. +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+ParallelRefProcEnabled" + +# this option can reduce the overhead caused by memory allocation, page fault interrupts, etc. during JVM operation. +# NOTE: it may reduce memory utilization and trigger OOM killer when memory is tight. +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+AlwaysPreTouch" + +# if you want to dump the heap memory while OOM happening, you can use the following command, remember to replace /tmp/heapdump.hprof with your own file path and the folder where this file is located needs to be created in advance +# IOTDB_JMX_OPTS="$IOTDB_JMX_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heap_dump_dir}/datanode_heapdump.hprof" + + +echo "DataNode on heap memory size = ${ON_HEAP_MEMORY}B, off heap memory size = ${OFF_HEAP_MEMORY}B" +echo "If you want to change this configuration, please check conf/datanode-env.sh." + diff --git a/docker/data/iotdb/conf/iotdb-cluster.properties b/docker/data/iotdb/conf/iotdb-cluster.properties new file mode 100644 index 00000000..b565be09 --- /dev/null +++ b/docker/data/iotdb/conf/iotdb-cluster.properties @@ -0,0 +1,33 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This configuration file needs to be configured only when the start-all.sh,stop-all.sh, and destroy.sh scripts are required. +# You also need to modify this configuration file when the cluster nodes change + +# Configure ConfigNodes machine addresses separated by , +confignode_address_list= +# Configure DataNodes machine addresses separated by , +datanode_address_list= +# User name for logging in to the deployment machine using ssh +ssh_account=root +# ssh login port +ssh_port=22 +# iotdb deployment directory (iotdb should be deployed to the following folders in all machines) +confignode_deploy_path= +datanode_deploy_path= \ No newline at end of file diff --git a/docker/data/iotdb/conf/iotdb-system.properties b/docker/data/iotdb/conf/iotdb-system.properties new file mode 100644 index 00000000..c51f948a --- /dev/null +++ b/docker/data/iotdb/conf/iotdb-system.properties @@ -0,0 +1,72 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +#################### +### Cluster Configuration +#################### + +cluster_name=defaultCluster + +#################### +### Seed ConfigNode +#################### + +cn_seed_config_node=127.0.0.1:10710 + +dn_seed_config_node=127.0.0.1:10710 + +#################### +### Node RPC Configuration +#################### + +cn_internal_address=127.0.0.1 +cn_internal_port=10710 +cn_consensus_port=10720 + +dn_rpc_address=0.0.0.0 +dn_rpc_port=6667 +dn_internal_address=127.0.0.1 +dn_internal_port=10730 +dn_mpp_data_exchange_port=10740 +dn_schema_region_consensus_port=10750 +dn_data_region_consensus_port=10760 + +#################### +### Replication configuration +#################### + +schema_replication_factor=1 +data_replication_factor=1 + +#################### +### Directory Configuration +#################### + +# dn_data_dirs=data/datanode/data +# dn_wal_dirs=data/datanode/wal + +#################### +### Metric Configuration +#################### + +# cn_metric_reporter_list= +cn_metric_prometheus_reporter_port=9091 + +# dn_metric_reporter_list= +dn_metric_prometheus_reporter_port=9092 diff --git a/docker/data/iotdb/conf/iotdb-system.properties.template b/docker/data/iotdb/conf/iotdb-system.properties.template new file mode 100644 index 00000000..3e14cc55 --- /dev/null +++ b/docker/data/iotdb/conf/iotdb-system.properties.template @@ -0,0 +1,1957 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +#################### +### Cluster Configuration +#################### + +# Used for indicate cluster name and distinguish different cluster. +# If you need to modify the cluster name, it's recommended to use 'set configuration "cluster_name=xxx"' sql. +# Manually modifying configuration file is not recommended, which may cause node restart fail. +# effectiveMode: hot_reload +# Datatype: string +cluster_name=defaultCluster + +#################### +### Seed ConfigNode +#################### + +# For the first ConfigNode to start, cn_seed_config_node points to its own cn_internal_address:cn_internal_port. +# For other ConfigNodes that to join the cluster, cn_seed_config_node points to any running ConfigNode's cn_internal_address:cn_internal_port. +# Note: After this ConfigNode successfully joins the cluster for the first time, this parameter is no longer used. +# Each node automatically maintains the list of ConfigNodes and traverses connections when restarting. +# Format: address:port e.g. 127.0.0.1:10710 +# effectiveMode: first_start +# Datatype: String +cn_seed_config_node=127.0.0.1:10710 + +# dn_seed_config_node points to any running ConfigNode's cn_internal_address:cn_internal_port. +# Note: After this DataNode successfully joins the cluster for the first time, this parameter is no longer used. +# Each node automatically maintains the list of ConfigNodes and traverses connections when restarting. +# Format: address:port e.g. 127.0.0.1:10710 +# effectiveMode: first_start +# Datatype: String +dn_seed_config_node=127.0.0.1:10710 + +#################### +### Node RPC Configuration +#################### + +# Used for RPC communication inside cluster. +# Could set 127.0.0.1(for local test) or ipv4 address. +# effectiveMode: first_start +# Datatype: String +cn_internal_address=127.0.0.1 + +# Used for RPC communication inside cluster. +# effectiveMode: first_start +# Datatype: int +cn_internal_port=10710 + +# Used for consensus communication among ConfigNodes inside cluster. +# effectiveMode: first_start +# Datatype: int +cn_consensus_port=10720 + +# Used for connection of IoTDB native clients(Session) +# Could set 127.0.0.1(for local test) or ipv4 address +# effectiveMode: restart +# Datatype: String +dn_rpc_address=0.0.0.0 + +# Used for connection of IoTDB native clients(Session) +# Bind with dn_rpc_address +# effectiveMode: restart +# Datatype: int +dn_rpc_port=6667 + +# Used for communication inside cluster. +# could set 127.0.0.1(for local test) or ipv4 address. +# effectiveMode: first_start +# Datatype: String +dn_internal_address=127.0.0.1 + +# Used for communication inside cluster. +# Bind with dn_internal_address +# effectiveMode: first_start +# Datatype: int +dn_internal_port=10730 + +# Port for data exchange among DataNodes inside cluster +# Bind with dn_internal_address +# effectiveMode: first_start +# Datatype: int +dn_mpp_data_exchange_port=10740 + +# port for consensus's communication for schema region inside cluster. +# Bind with dn_internal_address +# effectiveMode: first_start +# Datatype: int +dn_schema_region_consensus_port=10750 + +# port for consensus's communication for data region inside cluster. +# Bind with dn_internal_address +# effectiveMode: first_start +# Datatype: int +dn_data_region_consensus_port=10760 + +# The time of data node waiting for the next retry to join into the cluster. +# effectiveMode: restart +# Datatype: long +dn_join_cluster_retry_interval_ms=5000 + +#################### +### Replication configuration +#################### + +# ConfigNode consensus protocol type. +# This parameter is unmodifiable after ConfigNode starts for the first time. +# These consensus protocols are currently supported: +# 1. org.apache.iotdb.consensus.ratis.RatisConsensus +# 2. org.apache.iotdb.consensus.simple.SimpleConsensus (Only 1 ConfigNode can be deployed) +# effectiveMode: first_start +# Datatype: string +config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus + +# Default number of schema replicas +# effectiveMode: first_start +# Datatype: int +schema_replication_factor=1 + +# SchemaRegion consensus protocol type. +# This parameter is unmodifiable after ConfigNode starts for the first time. +# These consensus protocols are currently supported: +# 1. org.apache.iotdb.consensus.ratis.RatisConsensus +# 2. org.apache.iotdb.consensus.simple.SimpleConsensus (The schema_replication_factor can only be set to 1) +# effectiveMode: first_restart +# Datatype: string +schema_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus + +# Default number of data replicas +# effectiveMode: first_start +# Datatype: int +data_replication_factor=1 + +# DataRegion consensus protocol type. +# This parameter is unmodifiable after ConfigNode starts for the first time. +# These consensus protocols are currently supported: +# 1. org.apache.iotdb.consensus.simple.SimpleConsensus (The data_replication_factor can only be set to 1) +# 2. org.apache.iotdb.consensus.iot.IoTConsensus +# 3. org.apache.iotdb.consensus.ratis.RatisConsensus +# effectiveMode: first_start +# Datatype: string +data_region_consensus_protocol_class=org.apache.iotdb.consensus.iot.IoTConsensus + +#################### +### Directory configuration +#################### + +# system dir +# If this property is unset, system will save the data in the default relative path directory under the confignode folder(i.e., %CONFIGNODE_HOME%/data/confignode/system). +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the confignode folder. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# cn_system_dir=data\\confignode\\system +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +cn_system_dir=data/confignode/system + +# consensus dir +# If this property is unset, system will save the data in the default relative path directory under the confignode folder(i.e., %CONFIGNODE_HOME%/data/confignode/consensus). +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the confignode folder. +# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# cn_consensus_dir=data\\confignode\\consensus +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +cn_consensus_dir=data/confignode/consensus + +# cn_pipe_receiver_file_dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/${cn_system_dir}/pipe/receiver). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If cn_pipe_receiver_file_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# cn_pipe_receiver_file_dir=data\\confignode\\system\\pipe\\receiver +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +cn_pipe_receiver_file_dir=data/confignode/system/pipe/receiver + +# system dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode/system). +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_system_dir=data\\datanode\\system +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_system_dir=data/datanode/system + + +# data dirs +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode/data). +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# If there are more than one directory, please separate them by commas ",". +# Note: If data_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: hot_reload +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_data_dirs=data\\datanode\\data +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_data_dirs=data/datanode/data + + +# multi_dir_strategy +# The strategy is used to choose a directory from data_dirs for the system to store a new tsfile. +# System provides two strategies to choose from, or user can create his own strategy by extending org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategy. +# The info of the two strategies are as follows: +# 1. SequenceStrategy: the system will choose the directory in sequence. +# 2. MaxDiskUsableSpaceFirstStrategy: the system will choose the directory whose disk has the maximum space. +# Set SequenceStrategy or MaxDiskUsableSpaceFirstStrategy to apply the corresponding strategy. +# If this property is unset, system will use SequenceStrategy as default strategy. +# For this property, fully-qualified class name (include package name) and simple class name are both acceptable. +# effectiveMode: hot_reload +# Datatype: String +dn_multi_dir_strategy=SequenceStrategy + +# consensus dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If consensus_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_consensus_dir=data\\datanode\\consensus +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_consensus_dir=data/datanode/consensus + +# wal dirs +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# If there are more than one directory, please separate them by commas ",". +# Note: If wal_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_wal_dirs=data\\datanode\\wal +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_wal_dirs=data/datanode/wal + +# tracing dir +# Uncomment following fields to configure the tracing root directory. +# effectiveMode: restart +# For Windows platform, the index is as follows: +# dn_tracing_dir=datanode\\tracing +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_tracing_dir=datanode/tracing + +# sync dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If sync_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_sync_dir=data\\datanode\\sync +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_sync_dir=data/datanode/sync + +# sort_tmp_dir +# This property is used to configure the temporary directory for sorting operation. +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/datanode). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If sort_tmp_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# sort_tmp_dir=data\\datanode\\tmp +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +sort_tmp_dir=data/datanode/tmp + +# dn_pipe_receiver_file_dirs +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/${dn_system_dir}/pipe/receiver). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# If there are more than one directory, please separate them by commas ",". +# Note: If dn_pipe_receiver_file_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# dn_pipe_receiver_file_dirs=data\\datanode\\system\\pipe\\receiver +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +dn_pipe_receiver_file_dirs=data/datanode/system/pipe/receiver + +# pipe_consensus_receiver_file_dirs +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/${dn_system_dir}/pipe/consensus/receiver). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# If there are more than one directory, please separate them by commas ",". +# Note: If pipe_consensus_receiver_file_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# pipe_consensus_receiver_file_dirs=data\\datanode\\system\\pipe\\consensus\\receiver +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_consensus_receiver_file_dirs=data/datanode/system/pipe/consensus/receiver + +#################### +### Metric Configuration +#################### + +# The reporters of metric module to report metrics +# If there are more than one reporter, please separate them by commas ",". +# Options: [JMX, PROMETHEUS] +# effectiveMode: restart +# Datatype: String +cn_metric_reporter_list= + +# The level of metric module +# Options: [OFF, CORE, IMPORTANT, NORMAL, ALL] +# effectiveMode: restart +# Datatype: String +cn_metric_level=IMPORTANT + +# The period of async collection of some metrics in second +# effectiveMode: restart +# Datatype: int +cn_metric_async_collect_period=5 + +# The port of prometheus reporter of metric module +# effectiveMode: restart +# Datatype: int +cn_metric_prometheus_reporter_port=9091 + +# The reporters of metric module to report metrics +# If there are more than one reporter, please separate them by commas ",". +# Options: [JMX, PROMETHEUS] +# effectiveMode: restart +# Datatype: String +dn_metric_reporter_list= + +# The level of metric module +# Options: [OFF, CORE, IMPORTANT, NORMAL, ALL] +# effectiveMode: restart +# Datatype: String +dn_metric_level=IMPORTANT + +# The period of async collection of some metrics in second +# effectiveMode: restart +# Datatype: int +dn_metric_async_collect_period=5 + +# The port of prometheus reporter of metric module +# effectiveMode: restart +# Datatype: int +dn_metric_prometheus_reporter_port=9092 + +# The type of internal reporter in metric module, used for checking flushed point number +# Options: [MEMORY, IOTDB] +# effectiveMode: restart +# Datatype: String +dn_metric_internal_reporter_type=MEMORY + +#################### +### SSL Configuration +#################### + +# Does dn_rpc_port enable SSL +# effectiveMode: restart +# Datatype: boolean +enable_thrift_ssl=false + +# Rest Service enabled SSL +# effectiveMode: restart +# Datatype: boolean +enable_https=false + +# SSL key store path +# linux e.g. /home/iotdb/server.keystore (absolute path) or server.keystore (relative path) +# windows e.g. C:\\iotdb\\server.keystore (absolute path) or server.keystore (relative path) +# effectiveMode: restart +key_store_path= + +# SSL key store password +# effectiveMode: restart +# Datatype: String +key_store_pwd= + +#################### +### Connection Configuration +#################### + +# this feature is under development, set this as false before it is done. +# effectiveMode: restart +# Datatype: boolean +cn_rpc_thrift_compression_enable=false + +# The maximum number of concurrent clients that can be connected to the configNode. +# effectiveMode: restart +# Datatype: int +cn_rpc_max_concurrent_client_num=65535 + +# Thrift socket and connection timeout between raft nodes, in milliseconds. +# effectiveMode: restart +# Datatype: int +cn_connection_timeout_ms=60000 + +# selector thread (TAsyncClientManager) nums for async thread in a clientManager +# effectiveMode: restart +# Datatype: int +cn_selector_thread_nums_of_client_manager=1 + +# The maximum number of clients that can be allocated for a node in a clientManager. +# when the number of the client to a single node exceeds this number, the thread for applying for a client will be blocked +# for a while, then ClientManager will throw ClientManagerException if there are no clients after the block time. +# effectiveMode: restart +# Datatype: int +cn_max_client_count_for_each_node_in_client_manager=300 + +# The maximum session idle time. unit: ms +# Idle sessions are the ones that performs neither query or non-query operations for a period of time +# Set to 0 to disable session timeout +# effectiveMode: first_start +# Datatype: int +dn_session_timeout_threshold=0 + +# whether enable thrift compression +# effectiveMode: restart +# Datatype: boolean +dn_rpc_thrift_compression_enable=false + +# if true, a snappy based compression method will be called before sending data by the network +# effectiveMode: restart +# Datatype: boolean +# this feature is under development, set this as false before it is done. +dn_rpc_advanced_compression_enable=false + +# the number of rpc selector +# effectiveMode: restart +# Datatype: int +dn_rpc_selector_thread_count=1 + +# The min number of concurrent clients that can be connected to the dataNode. +# effectiveMode: restart +# Datatype: int +dn_rpc_min_concurrent_client_num=1 + +# The maximum number of concurrent clients that can be connected to the dataNode. +# effectiveMode: restart +# Datatype: int +dn_rpc_max_concurrent_client_num=65535 + +# thrift max frame size, 512MB by default +# effectiveMode: restart +# Datatype: int +dn_thrift_max_frame_size=536870912 + +# thrift init buffer size +# effectiveMode: restart +# Datatype: int +dn_thrift_init_buffer_size=1024 + +# Thrift socket and connection timeout between raft nodes, in milliseconds. +# effectiveMode: restart +# Datatype: int +dn_connection_timeout_ms=60000 + +# selector thread (TAsyncClientManager) nums for async thread in a clientManager +# effectiveMode: restart +# Datatype: int +dn_selector_thread_count_of_client_manager=1 + +# The maximum number of clients that can be allocated for a node in a clientManager. +# When the number of the client to a single node exceeds this number, the thread for applying for a client will be blocked +# for a while, then ClientManager will throw ClientManagerException if there are no clients after the block time. +# effectiveMode: restart +# Datatype: int +dn_max_client_count_for_each_node_in_client_manager=300 + +#################### +### REST Service Configuration +#################### + +# Is the REST service enabled +# effectiveMode: restart +# Datatype: boolean +enable_rest_service=false + +# the binding port of the REST service +# effectiveMode: restart +# Datatype: int +rest_service_port=18080 + +# Whether to display rest service interface information through swagger. eg: http://ip:port/swagger.json +# effectiveMode: restart +# Datatype: boolean +enable_swagger=false + +# the default row limit to a REST query response when the rowSize parameter is not given in request +# effectiveMode: restart +# Datatype: int +rest_query_default_row_size_limit=10000 + +# the expiration time of the user login information cache (in seconds) +# effectiveMode: restart +# Datatype: int +cache_expire_in_seconds=28800 + +# maximum number of users can be stored in the user login cache. +# effectiveMode: restart +# Datatype: int +cache_max_num=100 + +# init capacity of users can be stored in the user login cache. +# effectiveMode: restart +# Datatype: int +cache_init_num=10 + +# Is client authentication required +# effectiveMode: restart +# Datatype: boolean +client_auth=false + +# SSL trust store path +# effectiveMode: restart +trust_store_path="" + +# SSL trust store password. +# effectiveMode: restart +# Datatype: String +trust_store_pwd="" + +# SSL timeout (in seconds) +# effectiveMode: restart +# Datatype: int +idle_timeout_in_seconds=50000 + +#################### +### Load balancing configuration +#################### + +# All parameters in Partition configuration is unmodifiable after ConfigNode starts for the first time. +# And these parameters should be consistent within the ConfigNodeGroup. +# Number of SeriesPartitionSlots per Database +# effectiveMode: first_start +# Datatype: Integer +series_slot_num=1000 + +# SeriesPartitionSlot executor class +# These hashing algorithms are currently supported: +# 1. BKDRHashExecutor(Default) +# 2. APHashExecutor +# 3. JSHashExecutor +# 4. SDBMHashExecutor +# Also, if you want to implement your own SeriesPartition executor, you can inherit the SeriesPartitionExecutor class and +# modify this parameter to correspond to your Java class +# effectiveMode: first_start +# Datatype: String +series_partition_executor_class=org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor + +# The policy of extension SchemaRegionGroup for each Database. +# These policies are currently supported: +# 1. CUSTOM(Each Database will allocate schema_region_group_per_database RegionGroups as soon as created) +# 2. AUTO(Each Database will automatically extend SchemaRegionGroups based on the data it has) +# effectiveMode: restart +# Datatype: String +schema_region_group_extension_policy=AUTO + +# When set schema_region_group_extension_policy=CUSTOM, +# this parameter is the default number of SchemaRegionGroups for each Database. +# When set schema_region_group_extension_policy=AUTO, +# this parameter is the default minimal number of SchemaRegionGroups for each Database. +# effectiveMode: restart +# Datatype: Integer +default_schema_region_group_num_per_database=1 + +# Only take effect when set schema_region_group_extension_policy=AUTO. +# This parameter is the maximum number of SchemaRegions expected to be managed by each DataNode. +# Notice: Since each Database requires at least one SchemaRegionGroup to manage its schema, +# this parameter doesn't limit the upper bound of cluster SchemaRegions when there are too many Databases. +# effectiveMode: restart +# Datatype: Double +schema_region_per_data_node=1.0 + +# The policy of extension DataRegionGroup for each Database. +# These policies are currently supported: +# 1. CUSTOM(Each Database will allocate data_region_group_per_database DataRegionGroups as soon as created) +# 2. AUTO(Each Database will automatically extend DataRegionGroups based on the data it has) +# effectiveMode: restart +# Datatype: String +data_region_group_extension_policy=AUTO + +# When set data_region_group_extension_policy=CUSTOM, +# this parameter is the default number of DataRegionGroups for each Database. +# When set data_region_group_extension_policy=AUTO, +# this parameter is the default minimal number of DataRegionGroups for each Database. +# effectiveMode: restart +# Datatype: Integer +default_data_region_group_num_per_database=2 + +# Only take effect when set data_region_group_extension_policy=AUTO. +# This parameter is the maximum number of DataRegions expected to be managed by each DataNode. +# Notice: Since each Database requires at least two DataRegionGroups to manage its data, +# this parameter doesn't limit the upper bound of cluster DataRegions when there are too many Databases. +# effectiveMode: restart +# Datatype: Double +data_region_per_data_node=5.0 + +# Whether to enable auto leader balance for Ratis consensus protocol. +# The ConfigNode-leader will balance the leader of Ratis-RegionGroups by leader_distribution_policy if set true. +# Notice: Default is false because the Ratis is unstable for this function. +# effectiveMode: restart +# Datatype: Boolean +enable_auto_leader_balance_for_ratis_consensus=true + +# Whether to enable auto leader balance for IoTConsensus protocol. +# The ConfigNode-leader will balance the leader of IoTConsensus-RegionGroups by leader_distribution_policy if set true. +# Notice: Default is true because the IoTConsensus depends on this function to distribute leader. +# effectiveMode: restart +# Datatype: Boolean +enable_auto_leader_balance_for_iot_consensus=true + +#################### +### Cluster management +#################### + +# Time partition origin in milliseconds, default is equal to zero. +# This origin is set by default to the beginning of Unix time, which is January 1, 1970, at 00:00 UTC (Coordinated Universal Time). +# This point is known as the Unix epoch, and its timestamp is 0. +# If you want to specify a different time partition origin, you can set this value to a specific Unix timestamp in milliseconds. +# effectiveMode: first_start +# Datatype: long +time_partition_origin=0 + +# Time partition interval in milliseconds, and partitioning data inside each data region, default is equal to one week +# effectiveMode: first_start +# Datatype: long +time_partition_interval=604800000 + +# The heartbeat interval in milliseconds, default is 1000ms +# effectiveMode: restart +# Datatype: long +heartbeat_interval_in_ms=1000 + +# Disk remaining threshold at which DataNode is set to ReadOnly status +# effectiveMode: restart +# Datatype: double(percentage) +disk_space_warning_threshold=0.05 + +#################### +### Memory Control Configuration +#################### + +# Memory Allocation Ratio: StorageEngine, QueryEngine, SchemaEngine, Consensus, StreamingEngine and Free Memory. +# The parameter form is a:b:c:d:e:f, where a, b, c, d, e and f are integers. for example: 1:1:1:1:1:1 , 6:2:1:1:1:1 +# If you have high level of writing pressure and low level of reading pressure, please adjust it to for example 6:1:1:1:1:1 +# effectiveMode: restart +datanode_memory_proportion=3:3:1:1:1:1 + +# Schema Memory Allocation Ratio: SchemaRegion, SchemaCache, and PartitionCache. +# The parameter form is a:b:c, where a, b and c are integers. for example: 1:1:1 , 6:2:1 +# effectiveMode: restart +schema_memory_proportion=5:4:1 + +# Memory allocation ratio in StorageEngine: Write, Compaction +# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 8:2 , 7:3 +# effectiveMode: restart +storage_engine_memory_proportion=8:2 + +# Memory allocation ratio in writing: Memtable, TimePartitionInfo +# Memtable is the total memory size of all memtables +# TimePartitionInfo is the total memory size of last flush time of all data regions +# effectiveMode: restart +write_memory_proportion=19:1 + +# primitive array size (length of each array) in array pool +# effectiveMode: restart +# Datatype: int +primitive_array_size=64 + +# Ratio of compaction memory for chunk metadata maintains in memory when doing compaction +# effectiveMode: restart +# Datatype: double +chunk_metadata_size_proportion=0.1 + +# Ratio of write memory for invoking flush disk, 0.4 by default +# If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 +# effectiveMode: restart +# Datatype: double +flush_proportion=0.4 + +# Ratio of write memory allocated for buffered arrays, 0.6 by default +# effectiveMode: restart +# Datatype: double +buffered_arrays_memory_proportion=0.6 + +# Ratio of write memory for rejecting insertion, 0.8 by default +# If you have extremely high write load (like batch=1000) and the physical memory size is large enough, +# it can be set higher than the default value like 0.9 +# effectiveMode: restart +# Datatype: double +reject_proportion=0.8 + +# Ratio of memory for the DevicePathCache. DevicePathCache is the deviceId cache, keep only one copy of the same deviceId in memory +# effectiveMode: restart +# Datatype: double +device_path_cache_proportion=0.05 + +# If memory cost of data region increased more than proportion of allocated memory for write, report to system. The default value is 0.001 +# effectiveMode: restart +# Datatype: double +write_memory_variation_report_proportion=0.001 + +# When an inserting is rejected, waiting period (in ms) to check system again, 50 by default. +# If the insertion has been rejected and the read load is low, it can be set larger. +# effectiveMode: restart +# Datatype: int +check_period_when_insert_blocked=50 + +# size of ioTaskQueue. The default value is 10 +# effectiveMode: restart +# Datatype: int +io_task_queue_size_for_flushing=10 + +# If true, we will estimate each query's possible memory footprint before executing it and deny it if its estimated memory exceeds current free memory +# effectiveMode: hot_reload +# Datatype: bool +enable_query_memory_estimation=true + +#################### +### Schema Engine Configuration +#################### + +# The schema management mode of schema engine. Currently, support Memory and PBTree. +# This config of all DataNodes in one cluster must keep same. +# effectiveMode: first_start +# Datatype: string +schema_engine_mode=Memory + +# cache size for partition. +# This cache is used to improve partition fetch from config node. +# effectiveMode: restart +# Datatype: int +partition_cache_size=1000 + +# The cycle when metadata log is periodically forced to be written to disk(in milliseconds) +# If sync_mlog_period_in_ms=0 it means force metadata log to be written to disk after each refreshment +# Set this parameter to 0 may slow down the operation on slow disk. +# effectiveMode: restart +# Datatype: int +sync_mlog_period_in_ms=100 + +# interval num for tag and attribute records when force flushing to disk +# When a certain amount of tag and attribute records is reached, they will be force flushed to disk +# It is possible to lose at most tag_attribute_flush_interval records +# effectiveMode: first_start +# Datatype: int +tag_attribute_flush_interval=1000 + +# max size for a storage block for tags and attributes of one time series. If the combined size of tags and +# attributes exceeds the tag_attribute_total_size, a new storage block will be allocated to continue storing +# the excess data. +# the unit is byte +# effectiveMode: first_start +# Datatype: int +tag_attribute_total_size=700 + +# max measurement num of internal request +# When creating timeseries with Session.createMultiTimeseries, the user input plan, the timeseries num of +# which exceeds this num, will be split to several plans with timeseries no more than this num. +# effectiveMode: restart +# Datatype: int +max_measurement_num_of_internal_request=10000 + +# Policy of DataNodeSchemaCache eviction. +# Support FIFO and LRU policy. FIFO takes low cache update overhead. LRU takes high cache hit rate. +# effectiveMode: restart +# Datatype: int +datanode_schema_cache_eviction_policy=FIFO + +# This configuration parameter sets the maximum number of time series allowed in the cluster. +# The value should be a positive integer representing the desired threshold. +# When the threshold is reached, users will be prohibited from creating new time series. +# -1 means unlimited +# effectiveMode: restart +# Datatype: int +cluster_timeseries_limit_threshold=-1 + +# This configuration parameter sets the maximum number of device allowed in the cluster. +# The value should be a positive integer representing the desired threshold. +# When the threshold is reached, users will be prohibited from creating new time series. +# -1 means unlimited +# effectiveMode: restart +# Datatype: int +cluster_device_limit_threshold=-1 + +# This configuration parameter sets the maximum number of Cluster Databases allowed. +# The value should be a positive integer representing the desired threshold. +# When the threshold is reached, users will be prohibited from creating new databases. +# -1 means unlimited. +# effectiveMode: restart +# Datatype: int +database_limit_threshold = -1 + + + +#################### +### Configurations for creating schema automatically +#################### + +# Whether creating schema automatically is enabled +# If true, then create database and timeseries automatically when not exists in insertion +# Or else, user need to create database and timeseries before insertion. +# effectiveMode: hot_reload +# Datatype: boolean +enable_auto_create_schema=true + +# Database level when creating schema automatically is enabled +# e.g. root.sg0.d1.s2 +# we will set root.sg0 as the database if database level is 1 +# effectiveMode: hot_reload +# Datatype: int +default_storage_group_level=1 + +# ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT + +# register time series as which type when receiving boolean string "true" or "false" +# effectiveMode: hot_reload +# Datatype: TSDataType +# Options: BOOLEAN, TEXT +boolean_string_infer_type=BOOLEAN + +# register time series as which type when receiving an integer string and using float or double may lose precision +# effectiveMode: hot_reload +# Datatype: TSDataType +# Options: DOUBLE, FLOAT, INT32, INT64, TEXT +integer_string_infer_type=DOUBLE + +# register time series as which type when receiving a floating number string "6.7" +# effectiveMode: hot_reload +# Datatype: TSDataType +# Options: DOUBLE, FLOAT, TEXT +floating_string_infer_type=DOUBLE + +# register time series as which type when receiving the Literal NaN. +# effectiveMode: hot_reload +# Datatype: TSDataType +# Options: DOUBLE, FLOAT, TEXT +nan_string_infer_type=DOUBLE + +# BOOLEAN encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_boolean_encoding=RLE + +# INT32 encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_int32_encoding=TS_2DIFF + +# INT64 encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_int64_encoding=TS_2DIFF + +# FLOAT encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_float_encoding=GORILLA + +# DOUBLE encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_double_encoding=GORILLA + +# TEXT encoding when creating schema automatically is enabled +# effectiveMode: hot_reload +# Datatype: TSEncoding +default_text_encoding=PLAIN + +#################### +### Query Configurations +#################### + +# The read consistency level +# These consistency levels are currently supported: +# 1. strong(Default, read from the leader replica) +# 2. weak(Read from a random replica) +# effectiveMode: restart +# Datatype: string +read_consistency_level=strong + +# Whether to cache meta data(BloomFilter, ChunkMetadata and TimeSeriesMetadata) or not. +# effectiveMode: restart +# Datatype: boolean +meta_data_cache_enable=true + +# Read memory Allocation Ratio: BloomFilterCache : ChunkCache : TimeSeriesMetadataCache : Coordinator : Operators : DataExchange : timeIndex in TsFileResourceList : others. +# The parameter form is a:b:c:d:e:f:g:h, where a, b, c, d, e, f, g and h are integers. for example: 1:1:1:1:1:1:1:1 , 1:100:200:50:200:200:200:50 +# effectiveMode: restart +chunk_timeseriesmeta_free_memory_proportion=1:100:200:50:200:200:200:50 + +# Whether to enable LAST cache +# effectiveMode: restart +# Datatype: boolean +enable_last_cache=true + +# Core size of ThreadPool of MPP data exchange +# effectiveMode: restart +# Datatype: int +mpp_data_exchange_core_pool_size=10 + +# Max size of ThreadPool of MPP data exchange +# effectiveMode: restart +# Datatype: int +mpp_data_exchange_max_pool_size=10 + +# Max waiting time for MPP data exchange +# effectiveMode: restart +# Datatype: int +mpp_data_exchange_keep_alive_time_in_ms=1000 + +# The max execution time of a DriverTask +# effectiveMode: restart +# Datatype: int, Unit: ms +driver_task_execution_time_slice_in_ms=200 + +# The max capacity of a TsBlock +# effectiveMode: hot_reload +# Datatype: int, Unit: byte +max_tsblock_size_in_bytes=131072 + +# The max number of lines in a single TsBlock +# effectiveMode: hot_reload +# Datatype: int +max_tsblock_line_number=1000 + +# Time cost(ms) threshold for slow query +# effectiveMode: hot_reload +# Datatype: long +slow_query_threshold=10000 + +# The max executing time of query. unit: ms +# effectiveMode: restart +# Datatype: int +query_timeout_threshold=60000 + +# The maximum allowed concurrently executing queries +# effectiveMode: restart +# Datatype: int +max_allowed_concurrent_queries=1000 + +# How many threads can concurrently execute query statement. When <= 0, use CPU core number. +# effectiveMode: restart +# Datatype: int +query_thread_count=0 + +# How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2. +# effectiveMode: restart +# Datatype: int +degree_of_query_parallelism=0 + +# The threshold of count map size when calculating the MODE aggregation function +# effectiveMode: restart +# Datatype: int +mode_map_size_threshold=10000 + +# The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) +# effectiveMode: restart +# Datatype: int +batch_size=100000 + +# The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort. +# effectiveMode: restart +# Datatype: long +sort_buffer_size_in_bytes=1048576 + +# The threshold of operator count in the result set of EXPLAIN ANALYZE, if the number of operator in the result set is larger than this threshold, operator will be merged. +# effectiveMode: hot_reload +# Datatype: int +merge_threshold_of_explain_analyze=10 + +#################### +### TTL Configuration +#################### + +# The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours. +# Notice: It is not recommended to change it too small, as it will affect the read and write performance of the system. +# effectiveMode: restart +# Unit: ms +# Datatype: int +ttl_check_interval=7200000 + +# The maximum expiring time of device which has a ttl. Default is 1 month. +# If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction. +# Notice: It is not recommended to change it too small, as it will affect the read and write performance of the system. +# effectiveMode: restart +# Unit: ms +# Datatype: int +max_expired_time=2592000000 + +# The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction. +# effectiveMode: restart +# Datatype: float +expired_data_ratio=0.3 + +#################### +### Storage Engine Configuration +#################### + +# Use this value to set timestamp precision as "ms", "us" or "ns". +# Once the precision has been set, it can not be changed. +# effectiveMode: first_start +# Datatype: String +timestamp_precision=ms + +# When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted. +# For all precisions, ms, us and ns, the timestamps cannot exceed the range of [-9223372036854775808, 9223372036854775807], regardless of whether the check is enabled or not. +# effectiveMode: first_start +# Datatype: Boolean +timestamp_precision_check_enabled=true + +# When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. +# If the insertion has been rejected and the read load is low, it can be set larger +# effectiveMode: restart +# Datatype: int +max_waiting_time_when_insert_blocked=10000 + +# Add a switch to enable separate sequence and unsequence data. +# If it is true, then data will be separated into seq and unseq data dir. If it is false, then all data will be written into unseq data dir. +# effectiveMode: restart +# Datatype: boolean +enable_separate_data=true + +# What will the system do when unrecoverable error occurs. +# Datatype: String +# Optional strategies are as follows: +# 1. CHANGE_TO_READ_ONLY: set system status to read-only and the system only accepts query operations. +# 2. SHUTDOWN: the system will be shutdown. +# effectiveMode: restart +handle_system_error=CHANGE_TO_READ_ONLY + +# Whether to timed flush sequence tsfiles' memtables. +# effectiveMode: hot_reload +# Datatype: boolean +enable_timed_flush_seq_memtable=true + +# If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. +# Only check sequence tsfiles' memtables. +# The default flush interval is 10 * 60 * 1000. (unit: ms) +# effectiveMode: hot_reload +# Datatype: long +seq_memtable_flush_interval_in_ms=600000 + +# The interval to check whether sequence memtables need flushing. +# The default flush check interval is 30 * 1000. (unit: ms) +# effectiveMode: hot_reload +# Datatype: long +seq_memtable_flush_check_interval_in_ms=30000 + +# Whether to timed flush unsequence tsfiles' memtables. +# effectiveMode: hot_reload +# Datatype: boolean +enable_timed_flush_unseq_memtable=true + +# If a memTable's last update time is older than current time minus this, the memtable will be flushed to disk. +# Only check unsequence tsfiles' memtables. +# The default flush interval is 10 * 60 * 1000. (unit: ms) +# effectiveMode: hot_reload +# Datatype: long +unseq_memtable_flush_interval_in_ms=600000 + +# The interval to check whether unsequence memtables need flushing. +# The default flush check interval is 30 * 1000. (unit: ms) +# effectiveMode: hot_reload +# Datatype: long +unseq_memtable_flush_check_interval_in_ms=30000 + +# The sort algorithms used in the memtable's TVList +# TIM: default tim sort, +# QUICK: quick sort, +# BACKWARD: backward sort +# effectiveMode: restart +tvlist_sort_algorithm=TIM + +# When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. The default threshold is 100000. +# effectiveMode: restart +# Datatype: int +avg_series_point_number_threshold=100000 + +# How many threads can concurrently flush. When <= 0, use CPU core number. +# effectiveMode: restart +# Datatype: int +flush_thread_count=0 + +# In one insert (one device, one timestamp, multiple measurements), +# if enable partial insert, one measurement failure will not impact other measurements +# effectiveMode: restart +# Datatype: boolean +enable_partial_insert=true + +# the interval to log recover progress of each vsg when starting iotdb +# effectiveMode: restart +# Datatype: int +recovery_log_interval_in_ms=5000 + +# If using v0.13 client to insert data, please set this configuration to true. +# Notice: if using v0.13/v1.0 client or setting Client Version to V_0_13 manually, enable this config will disable insert redirection. +# effectiveMode: restart +# Datatype: boolean +0.13_data_insert_adapt=false + +# Verify that TSfiles generated by Flush, Load, and Compaction are correct. The following is verified: +# 1. Check whether the file contains a header and a tail +# 2. Check whether files can be deserialized successfully +# 3. Check whether the file contains data +# 4. Whether there is time range overlap between data, whether it is increased, and whether the metadata index offset of the sequence is correct +# effectiveMode: hot_reload +# Datatype: boolean +enable_tsfile_validation=false + +# Default tier TTL. When the survival time of the data exceeds the threshold, it will be migrated to the next tier. +# Negative value means the tier TTL is unlimited. +# effectiveMode: restart +# Datatype: long +# Unit: ms +tier_ttl_in_ms=-1 + +#################### +### Compaction Configurations +#################### +# sequence space compaction: only compact the sequence files +# effectiveMode: hot_reload +# Datatype: boolean +enable_seq_space_compaction=true + +# unsequence space compaction: only compact the unsequence files +# effectiveMode: hot_reload +# Datatype: boolean +enable_unseq_space_compaction=true + +# cross space compaction: compact the unsequence files into the overlapped sequence files +# effectiveMode: hot_reload +# Datatype: boolean +enable_cross_space_compaction=true + +# enable auto repair unsorted file by compaction +# effectiveMode: hot_reload +# Datatype: boolean +enable_auto_repair_compaction=true + +# the selector of cross space compaction task +# effectiveMode: restart +# Options: rewrite +cross_selector=rewrite + +# the compaction performer of cross space compaction task +# effectiveMode: restart +# Options: read_point, fast +cross_performer=fast + +# the selector of inner sequence space compaction task +# effectiveMode: hot_reload +# Options: size_tiered_single_target,size_tiered_multi_target +inner_seq_selector=size_tiered_multi_target + +# the performer of inner sequence space compaction task +# effectiveMode: restart +# Options: read_chunk, fast +inner_seq_performer=read_chunk + +# the selector of inner unsequence space compaction task +# effectiveMode: hot_reload +# Options: size_tiered_single_target,size_tiered_multi_target +inner_unseq_selector=size_tiered_multi_target + +# the performer of inner unsequence space compaction task +# effectiveMode: restart +# Options: read_point, fast +inner_unseq_performer=fast + +# The priority of compaction execution +# INNER_CROSS: prioritize inner space compaction, reduce the number of files first +# CROSS_INNER: prioritize cross space compaction, eliminate the unsequence files first +# BALANCE: alternate two compaction types +# effectiveMode: restart +compaction_priority=INNER_CROSS + +# The size of candidate compaction task queue. +# effectiveMode: restart +# Datatype: int +candidate_compaction_task_queue_size=50 + +# This parameter is used in two places: +# 1. The target tsfile size of inner space compaction. +# 2. The candidate size of seq tsfile in cross space compaction will be smaller than target_compaction_file_size * 1.5. +# In most cases, the target file size of cross compaction won't exceed this threshold, and if it does, it will not be much larger than it. +# default is 2GB +# effectiveMode: hot_reload +# Datatype: long, Unit: byte +target_compaction_file_size=2147483648 + +# The total file size limit in inner space compaction. +# default is 10GB +# effectiveMode: hot_reload +# Datatype: long, Unit: byte +inner_compaction_total_file_size_threshold=10737418240 + +# The total file num limit in inner space compaction. +# default is 100 +# effectiveMode: hot_reload +# Datatype: int +inner_compaction_total_file_num_threshold=100 + +# The max level gap in inner compaction selection, default is 2. +# effectiveMode: hot_reload +# Datatype: int +max_level_gap_in_inner_compaction=2 + +# The target chunk size in compaction and when memtable reaches this threshold, flush the memtable to disk. +# default is 1MB +# effectiveMode: restart +# Datatype: long, Unit: byte +target_chunk_size=1048576 + +# The target point nums in one chunk in compaction +# effectiveMode: restart +# Datatype: long +target_chunk_point_num=100000 + +# If the chunk size is lower than this threshold, it will be deserialized into points, default is 128 byte +# effectiveMode: restart +# Datatype: long, Unit:byte +chunk_size_lower_bound_in_compaction=128 + +# If the chunk point num is lower than this threshold, it will be deserialized into points +# effectiveMode: restart +# Datatype: long +chunk_point_num_lower_bound_in_compaction=100 + +# The file num requirement when selecting inner space compaction candidate files +# effectiveMode: hot_reload +# Datatype: int +inner_compaction_candidate_file_num=30 + +# The max file when selecting cross space compaction candidate files +# At least one unseq file with it's overlapped seq files will be selected even exceeded this number +# effectiveMode: hot_reload +# Datatype: int +max_cross_compaction_candidate_file_num=500 + +# The max total size when selecting cross space compaction candidate files +# At least one unseq file with it's overlapped seq files will be selected even exceeded this number +# effectiveMode: hot_reload +# Datatype: long, Unit: byte +max_cross_compaction_candidate_file_size=5368709120 + +# The min inner compaction level of unsequence file which can be selected as candidate +# effectiveMode: hot_reload +# Datatype: int +min_cross_compaction_unseq_file_level=1 + +# How many threads will be set up to perform compaction, 10 by default. +# Set to 1 when less than or equal to 0. +# effectiveMode: hot_reload +# Datatype: int +compaction_thread_count=10 + +# How many chunk will be compacted in aligned series compaction, 10 by default. +# Set to Integer.MAX_VALUE when less than or equal to 0. +# effectiveMode: hot_reload +# Datatype: int +compaction_max_aligned_series_num_in_one_batch=10 + +# The interval of compaction task schedule +# effectiveMode: restart +# Datatype: long, Unit: ms +compaction_schedule_interval_in_ms=60000 + +# The limit of write throughput merge can reach per second +# values less than or equal to 0 means no limit +# effectiveMode: hot_reload +# Datatype: int, Unit: megabyte +compaction_write_throughput_mb_per_sec=16 + +# The limit of read throughput merge can reach per second +# values less than or equal to 0 means no limit +# effectiveMode: hot_reload +# Datatype: int, Unit: megabyte +compaction_read_throughput_mb_per_sec=0 + +# The limit of read operation merge can reach per second +# values less than or equal to 0 means no limit +# effectiveMode: hot_reload +# Datatype: int +compaction_read_operation_per_sec=0 + +# The number of sub compaction threads to be set up to perform compaction. +# Currently only works for nonAligned data in cross space compaction and unseq inner space compaction. +# Set to 1 when less than or equal to 0. +# effectiveMode: hot_reload +# Datatype: int +sub_compaction_thread_count=4 + +# Redundancy value of disk availability, only use for inner compaction. +# When disk availability is lower than the sum of (disk_space_warning_threshold + inner_compaction_task_selection_disk_redundancy), inner compaction tasks containing mods files are selected first. +# effectiveMode: hot_reload +# DataType: double +inner_compaction_task_selection_disk_redundancy=0.05 + +# Mods file size threshold, only use for inner compaction. +# When the size of the mods file corresponding to TsFile exceeds this value, inner compaction tasks containing mods files are selected first. +# effectiveMode: hot_reload +# DataType: long +inner_compaction_task_selection_mods_file_threshold=131072 + +# The number of threads to be set up to select compaction task. +# effectiveMode: hot_reload +# Datatype: int +compaction_schedule_thread_num=4 + +#################### +### Write Ahead Log Configuration +#################### + +# Write mode of wal +# The details of these three modes are as follows: +# 1. DISABLE: the system will disable wal. +# 2. SYNC: the system will submit wal synchronously, write request will not return until its wal is fsynced to the disk successfully. +# 3. ASYNC: the system will submit wal asynchronously, write request will return immediately no matter its wal is fsynced to the disk successfully. +# The write performance order is DISABLE > ASYNC > SYNC, but only SYNC mode can ensure data durability. +# effectiveMode: restart +wal_mode=ASYNC + +# Max number of wal nodes, each node corresponds to one wal directory +# This parameter is only valid in the standalone mode. IoTConsensus uses one wal per data region and RatisConsensus doesn't use wal. +# The default value 0 means the number is determined by the system, the number is in the range of [data region num / 2, data region num]. +# Notice: this value affects write performance significantly. +# For non-SSD disks, values between one third and half of databases number are recommended. +# effectiveMode: restart +# Datatype: int +max_wal_nodes_num=0 + +# Duration a wal flush operation will wait before calling fsync in the async mode +# A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists. +# Notice: this value affects write performance significantly, values in the range of 10ms-2000ms are recommended. +# effective: hot_reload +# Datatype: long +wal_async_mode_fsync_delay_in_ms=1000 + +# Duration a wal flush operation will wait before calling fsync in the sync mode +# A duration greater than 0 batches multiple wal fsync calls into one. This is useful when disks are slow or WAL write contention exists. +# Notice: this value affects write performance significantly, values in the range of 0ms-10ms are recommended. +# effective: hot_reload +# Datatype: long +wal_sync_mode_fsync_delay_in_ms=3 + +# Buffer size of each wal node +# If it's a value smaller than 0, use the default value 32 * 1024 * 1024 bytes (32MB). +# effectiveMode: restart +# Datatype: int +wal_buffer_size_in_byte=33554432 + +# Blocking queue capacity of each wal buffer, restricts maximum number of WALEdits cached in the blocking queue. +# effectiveMode: restart +# Datatype: int +wal_buffer_queue_capacity=500 + +# Size threshold of each wal file +# When a wal file's size exceeds this, the wal file will be closed and a new wal file will be created. +# If it's a value smaller than 0, use the default value 30 * 1024 * 1024 (30MB). +# effectiveMode: hot_reload +# Datatype: long +wal_file_size_threshold_in_byte=31457280 + +# Minimum ratio of effective information in wal files +# This value should be between 0.0 and 1.0 +# If effective information ratio is below this value, MemTable snapshot or flush will be triggered. +# Increase this value when wal occupies too much disk space. But, if this parameter is too large, the write performance may decline. +# effectiveMode: hot_reload +# Datatype: double +wal_min_effective_info_ratio=0.1 + +# MemTable size threshold for triggering MemTable snapshot in wal +# When a memTable's size (in byte) exceeds this, wal can flush this memtable to disk, otherwise wal will snapshot this memtable in wal. +# If it's a value smaller than 0, use the default value 8 * 1024 * 1024 bytes (8MB). +# effectiveMode: hot_reload +# Datatype: long +wal_memtable_snapshot_threshold_in_byte=8388608 + +# MemTable's max snapshot number in wal +# If one memTable's snapshot number in wal exceeds this value, it will be flushed to disk. +# effectiveMode: hot_reload +# Datatype: int +max_wal_memtable_snapshot_num=1 + +# The period when outdated wal files are periodically deleted +# If this value is too large, outdated wal files may not able to be deleted in time. +# If it's a value smaller than 0, use the default value 20 * 1000 ms (20 seconds). +# effectiveMode: hot_reload +# Datatype: long +delete_wal_files_period_in_ms=20000 + +# The minimum size of wal files when throttle down in IoTConsensus +# If this value is not set, it will be carefully chosen according to the available disk space. +# If this value is set smaller than 0, it will default to 50 * 1024 * 1024 * 1024 bytes (50GB). +# effectiveMode: hot_reload +# Datatype: long +wal_throttle_threshold_in_byte=53687091200 + +# Maximum wait time of write cache in IoTConsensus +# If this value is less than or equal to 0, use the default value 10 * 1000 ms (10s) +# effectiveMode: hot_reload +# Datatype: long +iot_consensus_cache_window_time_in_ms=-1 + +# Enable Write Ahead Log compression. +# With this parameter enabled, IoTDB can save a lot of IO resources at the cost of a small amount +# of additional CPU resources, which is generally suitable for the scenario +# where CPU is not the bottleneck but IO is the bottleneck. +# effectiveMode: hot_reload +# Datatype: boolean +enable_wal_compression=true + +#################### +### IoTConsensus Configuration +#################### + +# The maximum log entries num in IoTConsensus Batch +# effectiveMode: hot_reload +# Datatype: int +data_region_iot_max_log_entries_num_per_batch = 1024 + +# The maximum size in IoTConsensus Batch +# effectiveMode: hot_reload +# Datatype: int +data_region_iot_max_size_per_batch = 16777216 + +# The maximum pending batches num in IoTConsensus +# effectiveMode: hot_reload +# Datatype: int +data_region_iot_max_pending_batches_num = 5 + +# The maximum memory ratio for queue in IoTConsensus +# effectiveMode: hot_reload +# Datatype: double +data_region_iot_max_memory_ratio_for_queue = 0.6 + +# The maximum transit size in byte per second for region migration +# values less than or equal to 0 means no limit +# effectiveMode: hot_reload +# Datatype: long +region_migration_speed_limit_bytes_per_second = 33554432 + +#################### +### TsFile Configurations +#################### + +# The maximum number of bytes written to disk each time the data in memory is written to disk +# effectiveMode: hot_reload +# Datatype: int +group_size_in_byte=134217728 + +# The memory size for each series writer to pack page, default value is 64KB +# effectiveMode: hot_reload +# Datatype: int +page_size_in_byte=65536 + +# The maximum number of data points in a page, default 10000 +# effectiveMode: hot_reload +# Datatype: int +max_number_of_points_in_page=10000 + +# The threshold for pattern matching in regex +# effectiveMode: restart +# Datatype: int +pattern_matching_threshold=1000000 + +# Floating-point precision of query results. +# Only effective for RLE and TS_2DIFF encodings. +# Due to the limitation of machine precision, some values may not be interpreted strictly. +# E.g.: 1.111111 with precision=3 will be shown as 1.11100001 +# effectiveMode: hot_reload +# Datatype: int +float_precision=2 + +# Encoder of value series. default value is PLAIN. +# For int, long data type, also supports TS_2DIFF and RLE(run-length encoding), GORILLA and ZIGZAG. +# effectiveMode: hot_reload +value_encoder=PLAIN + +# Compression configuration +# Data compression method, supports UNCOMPRESSED, SNAPPY, ZSTD, LZMA2 or LZ4. Default value is LZ4 +# And it is also used as the default compressor of time column in aligned timeseries. +# effectiveMode: hot_reload +compressor=LZ4 + +#################### +### Authorization Configuration +#################### + +# which class to serve for authorization. By default, it is LocalFileAuthorizer. +# Another choice is org.apache.iotdb.commons.auth.authorizer.OpenIdAuthorizer +# effectiveMode: restart +authorizer_provider_class=org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer + +# If OpenIdAuthorizer is enabled, then openID_url must be set. +# effectiveMode: restart +openID_url= + +# encryption provider class +# effectiveMode: first_start +iotdb_server_encrypt_decrypt_provider=org.apache.iotdb.commons.security.encrypt.MessageDigestEncrypt + +# encryption provided class parameter +# effectiveMode: first_start +iotdb_server_encrypt_decrypt_provider_parameter= + +# Cache size of user and role +# effectiveMode: restart +# Datatype: int +author_cache_size=1000 + +# Cache expire time of user and role +# effectiveMode: restart +# Datatype: int +author_cache_expire_time=30 + +#################### +### UDF Configuration +#################### + +# Used to estimate the memory usage of text fields in a UDF query. +# It is recommended to set this value to be slightly larger than the average length of all text +# records. +# effectiveMode: restart +# Datatype: int +udf_initial_byte_array_length_for_memory_control=48 + +# How much memory may be used in ONE UDF query (in MB). +# The upper limit is 20% of allocated memory for read. +# effectiveMode: restart +# Datatype: float +udf_memory_budget_in_mb=30.0 + +# UDF memory allocation ratio. +# The parameter form is a:b:c, where a, b, and c are integers. +# effectiveMode: restart +udf_reader_transformer_collector_memory_proportion=1:1:1 + +# UDF lib dir +# If this property is unset, system will save the data in the default relative path directory under +# the UDF folder(i.e., %CONFIGNODE_HOME%/ext/udf). +# +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the +# UDF folder. +# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative +# path. +# effectiveMode: restart +# For Windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# udf_lib_dir=ext\\udf +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +udf_lib_dir=ext/udf + +#################### +### Trigger Configuration +#################### + +# Uncomment the following field to configure the trigger lib directory. +# effectiveMode: restart +# For Windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# trigger_lib_dir=ext\\trigger +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +trigger_lib_dir=ext/trigger + +# How many times will we retry to found an instance of stateful trigger on DataNodes +# 3 by default. +# effectiveMode: restart +# Datatype: int +stateful_trigger_retry_num_when_not_found=3 + + +#################### +### Select-Into Configuration +#################### + +# The maximum memory occupied by the data to be written when executing select-into statements. +# effectiveMode: hot_reload +# Datatype: long +into_operation_buffer_size_in_byte=104857600 + +# The maximum number of rows can be processed in insert-tablet-plan when executing select-into statements. +# When <= 0, use 10000. +# effectiveMode: hot_reload +# Datatype: int +select_into_insert_tablet_plan_row_limit=10000 + +# The number of threads in the thread pool that execute insert-tablet tasks +# effectiveMode: restart +# Datatype: int +into_operation_execution_thread_count=2 + +#################### +### Continuous Query Configuration +#################### + +# The number of threads in the scheduled thread pool that submit continuous query tasks periodically +# effectiveMode: restart +# Datatype: int +continuous_query_submit_thread_count=2 + +# The minimum value of the continuous query execution time interval +# effectiveMode: restart +# Datatype: long(duration) +continuous_query_min_every_interval_in_ms=1000 + +#################### +### Pipe Configuration +#################### + +# Uncomment the following field to configure the pipe lib directory. +# effectiveMode: first_start +# For Windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# pipe_lib_dir=ext\\pipe +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_lib_dir=ext/pipe + +# The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. +# The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). +# effectiveMode: restart +# Datatype: int +pipe_subtask_executor_max_thread_num=5 + +# The connection timeout (in milliseconds) for the thrift client. +# effectiveMode: restart +# Datatype: int +pipe_sink_timeout_ms=900000 + +# The maximum number of selectors that can be used in the sink. +# Recommend to set this value to less than or equal to pipe_sink_max_client_number. +# effectiveMode: restart +# Datatype: int +pipe_sink_selector_number=4 + +# The maximum number of clients that can be used in the sink. +# effectiveMode: restart +# Datatype: int +pipe_sink_max_client_number=16 + +# Whether to enable receiving pipe data through air gap. +# The receiver can only return 0 or 1 in tcp mode to indicate whether the data is received successfully. +# effectiveMode: restart +# Datatype: Boolean +pipe_air_gap_receiver_enabled=false + +# The port for the server to receive pipe data through air gap. +# Datatype: int +# effectiveMode: restart +pipe_air_gap_receiver_port=9780 + +# The total bytes that all pipe sinks can transfer per second. +# When given a value less than or equal to 0, it means no limit. +# default value is -1, which means no limit. +# effectiveMode: hot_reload +# Datatype: double +pipe_all_sinks_rate_limit_bytes_per_second=-1 + +#################### +### RatisConsensus Configuration +#################### + +# max payload size for a single log-sync-RPC from leader to follower(in byte, by default 16MB) +# effectiveMode: restart +# Datatype: int +config_node_ratis_log_appender_buffer_size_max=16777216 +schema_region_ratis_log_appender_buffer_size_max=16777216 +data_region_ratis_log_appender_buffer_size_max=16777216 + +# trigger a snapshot when snapshot_trigger_threshold logs are written +# effectiveMode: restart +# Datatype: int +config_node_ratis_snapshot_trigger_threshold=400000 +schema_region_ratis_snapshot_trigger_threshold=400000 +data_region_ratis_snapshot_trigger_threshold=400000 + +# allow flushing Raft Log asynchronously +# effectiveMode: restart +# Datatype: Boolean +config_node_ratis_log_unsafe_flush_enable=false +schema_region_ratis_log_unsafe_flush_enable=false +data_region_ratis_log_unsafe_flush_enable=false + +# max capacity of a single Log segment file (in byte, by default 24MB) +# effectiveMode: restart +# Datatype: int +config_node_ratis_log_segment_size_max_in_byte=25165824 +schema_region_ratis_log_segment_size_max_in_byte=25165824 +data_region_ratis_log_segment_size_max_in_byte=25165824 +config_node_simple_consensus_log_segment_size_max_in_byte=25165824 + +# flow control window for ratis grpc log appender +# effectiveMode: restart +# Datatype: int +config_node_ratis_grpc_flow_control_window=4194304 +schema_region_ratis_grpc_flow_control_window=4194304 +data_region_ratis_grpc_flow_control_window=4194304 +config_node_ratis_grpc_leader_outstanding_appends_max=128 +schema_region_ratis_grpc_leader_outstanding_appends_max=128 +data_region_ratis_grpc_leader_outstanding_appends_max=128 +config_node_ratis_log_force_sync_num=128 +schema_region_ratis_log_force_sync_num=128 +data_region_ratis_log_force_sync_num=128 + +# min election timeout for leader election +# effectiveMode: restart +# Datatype: int +config_node_ratis_rpc_leader_election_timeout_min_ms=2000 +schema_region_ratis_rpc_leader_election_timeout_min_ms=2000 +data_region_ratis_rpc_leader_election_timeout_min_ms=2000 + +# max election timeout for leader election +# effectiveMode: restart +# Datatype: int +config_node_ratis_rpc_leader_election_timeout_max_ms=4000 +schema_region_ratis_rpc_leader_election_timeout_max_ms=4000 +data_region_ratis_rpc_leader_election_timeout_max_ms=4000 + +# ratis client retry threshold +# effectiveMode: restart +# Datatype: int +config_node_ratis_request_timeout_ms=10000 +schema_region_ratis_request_timeout_ms=10000 +data_region_ratis_request_timeout_ms=10000 + +# currently we use exponential back-off retry policy for ratis +# effectiveMode: restart +# Datatype: int +config_node_ratis_max_retry_attempts=10 +config_node_ratis_initial_sleep_time_ms=100 +config_node_ratis_max_sleep_time_ms=10000 +schema_region_ratis_max_retry_attempts=10 +schema_region_ratis_initial_sleep_time_ms=100 +schema_region_ratis_max_sleep_time_ms=10000 +data_region_ratis_max_retry_attempts=10 +data_region_ratis_initial_sleep_time_ms=100 +data_region_ratis_max_sleep_time_ms=10000 + +# first election timeout +# effectiveMode: restart +# Datatype: int +ratis_first_election_timeout_min_ms=50 +ratis_first_election_timeout_max_ms=150 + +# preserve certain logs when take snapshot and purge +# effectiveMode: restart +# Datatype: int +config_node_ratis_preserve_logs_num_when_purge=1000 +schema_region_ratis_preserve_logs_num_when_purge=1000 +data_region_ratis_preserve_logs_num_when_purge=1000 + +# Raft Log disk size control +# effectiveMode: restart +# Datatype: int +config_node_ratis_log_max_size = 2147483648 +schema_region_ratis_log_max_size = 2147483648 +data_region_ratis_log_max_size = 21474836480 + +# Raft periodic snapshot interval, time unit is second +# effectiveMode: restart +# Datatype: int +config_node_ratis_periodic_snapshot_interval=86400 +schema_region_ratis_periodic_snapshot_interval=86400 +data_region_ratis_periodic_snapshot_interval=86400 + +#################### +### Fast IoTConsensus Configuration +#################### +# Default event buffer size for connector and receiver in pipe consensus +# effectiveMode: hot_reload +# DataType: int +fast_iot_consensus_pipeline_size=5 + +#################### +### Procedure Configuration +#################### + +# Default number of worker thread count +# effectiveMode: restart +# Datatype: int +procedure_core_worker_thread_count=4 + +# Default time interval of completed procedure cleaner work in, time unit is second +# effectiveMode: restart +# Datatype: int +procedure_completed_clean_interval=30 + +# Default ttl of completed procedure, time unit is second +# effectiveMode: restart +# Datatype: int +procedure_completed_evict_ttl=60 + +#################### +### MQTT Broker Configuration +#################### + +# whether to enable the mqtt service. +# effectiveMode: restart +# Datatype: boolean +enable_mqtt_service=false + +# the mqtt service binding host. +# effectiveMode: restart +# Datatype: String +mqtt_host=127.0.0.1 + +# the mqtt service binding port. +# effectiveMode: restart +# Datatype: int +mqtt_port=1883 + +# the handler pool size for handing the mqtt messages. +# effectiveMode: restart +# Datatype: int +mqtt_handler_pool_size=1 + +# the mqtt message payload formatter. +# effectiveMode: restart +# Datatype: String +mqtt_payload_formatter=json + +# max length of mqtt message in byte +# effectiveMode: restart +# Datatype: int +mqtt_max_message_size=1048576 + +#################### +### IoTDB-AI Configuration +#################### + +# The thread count which can be used for model inference operation. +# effectiveMode: restart +# Datatype: int +model_inference_execution_thread_count=5 + +#################### +### Load TsFile Configuration +#################### + +# Load clean up task is used to clean up the unsuccessful loaded tsfile after a certain period of time. +# The parameter is the delay time after an unsuccessful load operation (in seconds). +# effectiveMode: hot_reload +# Datatype: int +load_clean_up_task_execution_delay_time_seconds=1800 + +# The maximum bytes per second of disk write throughput when loading tsfile. +# When given a value less than or equal to 0, it means no limit. +# Default value is -1, which means no limit. +# effectiveMode: hot_reload +# Datatype: int +load_write_throughput_bytes_per_second=-1 + +# Whether to enable the active listening mode for tsfile loading. +# effectiveMode: hot_reload +# Datatype: Boolean +load_active_listening_enable=true + +# The directory to be actively listened for tsfile loading. +# Multiple directories should be separated by a ','. +# The default directory is 'ext/load/pending'. +# effectiveMode: hot_reload +# Datatype: String +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. +# Otherwise, it is relative. +# load_active_listening_dirs=ext\\load\\pending +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +load_active_listening_dirs=ext/load/pending + +# The directory where tsfile are moved if the active listening mode fails to load them. +# Only one directory can be configured. +# effectiveMode: hot_reload +# Datatype: String +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. +# Otherwise, it is relative. +# load_active_listening_fail_dir=ext\\load\\failed +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +load_active_listening_fail_dir=ext/load/failed + +# The maximum number of threads that can be used to load tsfile actively. +# The default value, when this parameter is commented out or <= 0, use CPU core number. +# effectiveMode: restart +# Datatype: int +load_active_listening_max_thread_num=0 + +# The interval specified in seconds for the active listening mode to check the directory specified in load_active_listening_dirs. +# The active listening mode will check the directory every load_active_listening_check_interval_seconds seconds. +# effectiveMode: restart +# Datatype: int +load_active_listening_check_interval_seconds=5 + +#################### +### Dispatch Retry Configuration +#################### + +# The maximum retrying time for write request remotely dispatching, time unit is milliseconds. +# It only takes effect for write request remotely dispatching, not including locally dispatching and query +# Set to 0 or negative number to disable remote dispatching write request retrying +# We will sleep for some time between each retry, 100ms, 200ms, 400ms, 800ms and so on, util reaching 20,000ms, we won't increase the sleeping time any more +# effectiveMode: hot_reload +# Datatype: long +write_request_remote_dispatch_max_retry_duration_in_ms=60000 + +# Whether retrying for unknown errors. +# Current unknown errors includes EXECUTE_STATEMENT_ERROR(301) and INTERNAL_SERVER_ERROR(305) +# effectiveMode: hot_reload +# Datatype: boolean +enable_retry_for_unknown_error=false diff --git a/docker/data/iotdb/conf/jmx.access b/docker/data/iotdb/conf/jmx.access new file mode 100644 index 00000000..d6b57bc3 --- /dev/null +++ b/docker/data/iotdb/conf/jmx.access @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# see https://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html#gdeup +iotdb readonly +root readwrite diff --git a/docker/data/iotdb/conf/jmx.password b/docker/data/iotdb/conf/jmx.password new file mode 100644 index 00000000..9055f019 --- /dev/null +++ b/docker/data/iotdb/conf/jmx.password @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# see https://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html#gdeup +iotdb passw!d +root passw!d \ No newline at end of file diff --git a/docker/data/iotdb/conf/logback-backup.xml b/docker/data/iotdb/conf/logback-backup.xml new file mode 100644 index 00000000..78fa2257 --- /dev/null +++ b/docker/data/iotdb/conf/logback-backup.xml @@ -0,0 +1,49 @@ + + + + + System.out + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + ERROR + + + + UTF-8 + ${IOTDB_HOME}/logs/log_backup.log + + ${IOTDB_HOME}/logs/log-backup-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + + + + diff --git a/docker/data/iotdb/conf/logback-confignode.xml b/docker/data/iotdb/conf/logback-confignode.xml new file mode 100644 index 00000000..0322b49e --- /dev/null +++ b/docker/data/iotdb/conf/logback-confignode.xml @@ -0,0 +1,112 @@ + + + + + + + + ${CONFIGNODE_HOME}/logs/log_confignode_error.log + + ${CONFIGNODE_HOME}/logs/log-confignode-error-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + error + ACCEPT + DENY + + + + ${CONFIGNODE_HOME}/logs/log_confignode_warn.log + + ${CONFIGNODE_HOME}/logs/log-confignode-warn-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + WARN + ACCEPT + DENY + + + + ${CONFIGNODE_HOME}/logs/log_confignode_debug.log + + ${CONFIGNODE_HOME}/logs/log-confignode-debug-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + DEBUG + ACCEPT + DENY + + + + System.out + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + ${CONSOLE_LOG_LEVEL:-DEBUG} + + + + + ${CONFIGNODE_HOME}/logs/log_confignode_all.log + + ${CONFIGNODE_HOME}/logs/log-confignode-all-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + + + + + + + + + diff --git a/docker/data/iotdb/conf/logback-datanode.xml b/docker/data/iotdb/conf/logback-datanode.xml new file mode 100644 index 00000000..698bdfc3 --- /dev/null +++ b/docker/data/iotdb/conf/logback-datanode.xml @@ -0,0 +1,267 @@ + + + + + + + + ${IOTDB_HOME}/logs/log_datanode_error.log + + ${IOTDB_HOME}/logs/log-datanode-error-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + error + ACCEPT + DENY + + + + ${IOTDB_HOME}/logs/log_datanode_warn.log + + ${IOTDB_HOME}/logs/log-datanode-warn-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + WARN + ACCEPT + DENY + + + + ${IOTDB_HOME}/logs/log_datanode_debug.log + + ${IOTDB_HOME}/logs/log-datanode-debug-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + DEBUG + ACCEPT + DENY + + + + ${IOTDB_HOME}/logs/log_datanode_trace.log + + ${IOTDB_HOME}/logs/log-datanode-trace-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + TRACE + ACCEPT + DENY + + + + System.out + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + ${CONSOLE_LOG_LEVEL:-DEBUG} + + + + + ${IOTDB_HOME}/logs/log_datanode_all.log + + ${IOTDB_HOME}/logs/log-datanode-all-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_measure.log + + ${IOTDB_HOME}/logs/log-datanode-measure-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_audit.log + + ${IOTDB_HOME}/logs/log-datanode-audit-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_query_debug.log + + ${IOTDB_HOME}/logs/log-datanode-query-debug-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_slow_sql.log + + ${IOTDB_HOME}/logs/log-datanode-slow-sql-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_compaction.log + + ${IOTDB_HOME}/logs/log-datanode-compaction-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_datanode_pipe.log + + ${IOTDB_HOME}/logs/log-datanode-pipe-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + ${IOTDB_HOME}/logs/log_explain_analyze.log + + ${IOTDB_HOME}/logs/log-datanode-explain-%d{yyyyMMdd}.log.gz + 30 + + true + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + INFO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docker/data/iotdb/conf/logback-tool.xml b/docker/data/iotdb/conf/logback-tool.xml new file mode 100644 index 00000000..7b0e9b82 --- /dev/null +++ b/docker/data/iotdb/conf/logback-tool.xml @@ -0,0 +1,36 @@ + + + + + System.out + + %d [%t] %-5p %C{25}:%L - %m %n + utf-8 + + + ERROR + + + + + + diff --git a/docker/data/iotdb/iotdb-jdbc-1.3.3-jar-with-dependencies.jar b/docker/data/iotdb/iotdb-jdbc-1.3.3-jar-with-dependencies.jar new file mode 100644 index 00000000..de9eb1c1 Binary files /dev/null and b/docker/data/iotdb/iotdb-jdbc-1.3.3-jar-with-dependencies.jar differ diff --git a/docker/data/tdengine/conf/explorer.toml b/docker/data/tdengine/conf/explorer.toml new file mode 100644 index 00000000..3bc6d567 --- /dev/null +++ b/docker/data/tdengine/conf/explorer.toml @@ -0,0 +1,67 @@ +# This is a automacically generated configuration file for Explorer in [TOML](https://toml.io/) format. +# +# Here is a full list of available options. + +# Explorer server port to listen on. +# Default is 6060. +# +port = 6060 + +# IPv4 listen address. +# Default is 0.0.0.0 +addr = "0.0.0.0" + +# IPv6 listen address. + +# ipv6 = "::1" + +# Explorer server log level. +# Default is "info" +# +log_level = "info" + +# REST API endpoint to connect to the cluster. +# This configuration is also the target for data migration tasks. +# +# Default is "http://buildkitsandbox:6041" - the default endpoint for REST API. +# +cluster = "http://fastbee:6041" + +# native endpoint to connect to the cluster. +# Default is disabled. To enable it, set it to the native API URL like "taos://buildkitsandbox:6030" and uncomment it. +# If you enable it, you will get more performance for data migration tasks. +# +# cluster_native = "taos://buildkitsandbox:6030" + +# API endpoint for data replication/backup/data sources. No default option. +# Set it to API URL like "http://buildkitsandbox:6050". +# +x_api ="http://fastbee:6050" + +# GRPC endpoint for "Agent"s. +# Default is "http://buildkitsandbox:6055" - the default endpoint for taosX grpc API. +# You should set it to public IP or FQDN name like: +# "http://192.168.111.111:6055" or "http://node1.company.domain:6055" and +# ensure to add the port to the exception list of the firewall if it enabled. +grpc = "http://fastbee:6055" + +# CORS configuration switch, it allows cross-origin access +cors = true + +# cloud open api. +# cloud_open_api = "https://pre.ali.cloud.taosdata.com/openapi" + +# Enable ssl +# If the following two files exist, enable ssl protocol +# +[ssl] + +# SSL certificate +# +# certificate = "/path/to/ca.file" # on linux/macOS +# certificate = "C:\\path\\to\\ca.file" # on windows + +# SSL certificate key +# +# certificate_key = "/path/to/key.file" # on linux/macOS +# certificate_key = "C:\\path\\to\\key.file" # on windows diff --git a/docker/data/tdengine/conf/taos.cfg b/docker/data/tdengine/conf/taos.cfg new file mode 100644 index 00000000..fda6a100 --- /dev/null +++ b/docker/data/tdengine/conf/taos.cfg @@ -0,0 +1,193 @@ +######################################################## +# # +# Configuration # +# # +######################################################## + +######### 0. Client only configurations ############# + +# The interval for CLI to send heartbeat to mnode +# shellActivityTimer 3 + + +############### 1. Cluster End point ############################ + +# The end point of the first dnode in the cluster to be connected to when this dnode or the CLI utility is started +# firstEp hostname:6030 + +# The end point of the second dnode to be connected to if the firstEp is not available +# secondEp + + +############### 2. Configuration Parameters of current dnode ##### + +# The FQDN of the host on which this dnode will be started. It can be IP address +fqdn fastbee + +# The port for external access after this dnode is started +# serverPort 6030 + +# The maximum number of connections a dnode can accept +# maxShellConns 5000 + +# The directory for writing log files, if you are using Windows platform please change to Windows path +# logDir /var/log/taos + +# All data files are stored in this directory, if you are using Windows platform please change to Windows path +# dataDir /var/lib/taos + +# temporary file's directory, if you are using Windows platform please change to Windows path +# tempDir /tmp/ + +# Switch for allowing to collect and report service usage information +# telemetryReporting 1 + +# Switch for allowing to collect and report crash information +# crashReporting 1 + +# The maximum number of vnodes supported by this dnode +# supportVnodes 0 + +# The interval of this dnode reporting status to mnode, [1..10] seconds +# statusInterval 1 + +# The minimum sliding window time, milli-second +# minSlidingTime 10 + +# The minimum time window, milli-second +# minIntervalTime 10 + +# The maximum allowed query buffer size in MB during query processing for each data node +# -1 no limit (default) +# 0 no query allowed, queries are disabled +# queryBufferSize -1 + +# The compressed rpc message, option: +# -1 (no compression) +# 0 (all message compressed), +# > 0 (rpc message body which larger than this value will be compressed) +# compressMsgSize -1 + +# query retrieved column data compression option: +# -1 (no compression) +# 0 (all retrieved column data compressed), +# > 0 (any retrieved column size greater than this value all data will be compressed.) +# compressColData -1 + +# system time zone +# timezone UTC-8 + +# system time zone (for windows 10) +# timezone Asia/Shanghai (CST, +0800) + +# system locale +# locale en_US.UTF-8 + +# system charset +# charset UTF-8 + +# stop writing logs when the disk size of the log folder is less than this value +# minimalLogDirGB 1.0 + +# stop writing temporary files when the disk size of the tmp folder is less than this value +# minimalTmpDirGB 1.0 + +# if free disk space is less than this value, this dnode will fail to start +# minimalDataDirGB 2.0 + +# enable/disable system monitor +# monitor 1 + +# enable/disable audit log +# audit 1 + +# enable/disable audit create table +# auditCreateTable 1 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of lines per log filters +# numOfLogLines 10000000 + +# write log in async way: 1 - async, 0 - sync +# asyncLog 1 + +# time period of keeping log files, in days +# logKeepDays 0 + + +############ 3. Debug Flag and levels ############################################# + +# The following parameters are used for debug purpose only by this dnode. +# debugFlag is a 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# Available debug levels are: +# 131: output warning and error +# 135: output debug, warning and error +# 143: output trace, debug, warning and error to log +# 199: output debug, warning and error to both screen and file +# 207: output trace, debug, warning and error to both screen and file + +# debug flag for all log type, take effect when non-zero value +# debugFlag 0 + +# debug flag for timer +# tmrDebugFlag 131 + +# debug flag for util +# uDebugFlag 131 + +# debug flag for rpc +# rpcDebugFlag 131 + +# debug flag for jni +# jniDebugFlag 131 + +# debug flag for query +# qDebugFlag 131 + +# debug flag for client driver +# cDebugFlag 131 + +# debug flag for dnode messages +# dDebugFlag 135 + +# debug flag for vnode +# vDebugFlag 131 + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for wal +# wDebugFlag 135 + +# debug flag for sync module +# sDebugFlag 135 + +# debug flag for tsdb +# tsdbDebugFlag 131 + +# debug flag for tq +# tqDebugFlag 131 + +# debug flag for fs +# fsDebugFlag 131 + +# debug flag for udf +# udfDebugFlag 131 + +# debug flag for sma +# smaDebugFlag 131 + +# debug flag for index +# idxDebugFlag 131 + +# debug flag for tdb +# tdbDebugFlag 131 + +# debug flag for meta +# metaDebugFlag 131 + +# generate core file when service crash +# enableCoreFile 1 +monitor 1 +monitorFQDN fastbee +audit 1 diff --git a/docker/data/tdengine/conf/taosadapter.toml b/docker/data/tdengine/conf/taosadapter.toml new file mode 100644 index 00000000..4067f755 --- /dev/null +++ b/docker/data/tdengine/conf/taosadapter.toml @@ -0,0 +1,110 @@ +debug = true +taosConfigDir = "" +port = 6041 +logLevel = "info" +httpCodeServerError = false +SMLAutoCreateDB = false + +[cors] +allowAllOrigins = true + +#[pool] +#maxConnect = 0 +#maxIdle = 0 +#idleTimeout = 0 + +[ssl] +enable = false +certFile = "" +keyFile = "" + +[log] +#path = "/var/log/taos" +rotationCount = 30 +rotationTime = "24h" +rotationSize = "1GB" +enableRecordHttpSql = false +sqlRotationCount = 2 +sqlRotationTime = "24h" +sqlRotationSize = "1GB" + +[monitor] +disable = true +collectDuration = "3s" +incgroup = false +pauseQueryMemoryThreshold = 70 +pauseAllMemoryThreshold = 80 +identity = "" + +[uploadKeeper] +enable = true +url = "http://127.0.0.1:6043/adapter_report" +interval = "15s" +timeout = "5s" +retryTimes = 3 +retryInterval = "5s" + +[opentsdb] +enable = true + +[influxdb] +enable = true + +[statsd] +enable = false +port = 6044 +db = "statsd" +user = "root" +password = "taosdata" +worker = 10 +gatherInterval = "5s" +protocol = "udp4" +maxTCPConnections = 250 +tcpKeepAlive = false +allowPendingMessages = 50000 +deleteCounters = true +deleteGauges = true +deleteSets = true +deleteTimings = true + +[collectd] +enable = false +port = 6045 +db = "collectd" +user = "root" +password = "taosdata" +worker = 10 + + +[opentsdb_telnet] +enable = false +maxTCPConnections = 250 +tcpKeepAlive = false +dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"] +ports = [6046, 6047, 6048, 6049] +user = "root" +password = "taosdata" +batchSize = 1 +flushInterval = "0s" + +[node_exporter] +enable = false +db = "node_exporter" +user = "root" +password = "taosdata" +urls = ["http://fastbee:9100"] +responseTimeout = "5s" +httpUsername = "" +httpPassword = "" +httpBearerTokenString = "" +caCertFile = "" +certFile = "" +keyFile = "" +insecureSkipVerify = true +gatherDuration = "5s" + +[prometheus] +enable = true + +[tmq] +releaseIntervalMultiplierForAutocommit = 2 diff --git a/docker/data/tdengine/conf/taoskeeper.toml b/docker/data/tdengine/conf/taoskeeper.toml new file mode 100644 index 00000000..038d48de --- /dev/null +++ b/docker/data/tdengine/conf/taoskeeper.toml @@ -0,0 +1,48 @@ +# Start with debug middleware for gin +debug = false + +# Listen port, default is 6043 +port = 6043 + +# log level +loglevel = "info" + +# go pool size +gopoolsize = 50000 + +# interval for metrics +RotationInterval = "15s" + +[tdengine] +host = "fastbee" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +KEEP = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[log] +#path = "/var/log/taos" +rotationCount = 5 +rotationTime = "24h" +rotationSize = 100000000 diff --git a/docker/data/tdengine/taos-jdbcdriver-3.3.2-dist.jar b/docker/data/tdengine/taos-jdbcdriver-3.3.2-dist.jar new file mode 100644 index 00000000..85290472 Binary files /dev/null and b/docker/data/tdengine/taos-jdbcdriver-3.3.2-dist.jar differ diff --git a/docker/data/tdengine/taos-jdbcdriver-3.5.3-dist.jar b/docker/data/tdengine/taos-jdbcdriver-3.5.3-dist.jar new file mode 100644 index 00000000..9dae8fdd Binary files /dev/null and b/docker/data/tdengine/taos-jdbcdriver-3.5.3-dist.jar differ diff --git a/docker/data/tdengine/tdengine文件.txt b/docker/data/tdengine/tdengine文件.txt new file mode 100644 index 00000000..e69de29b diff --git a/springboot/fastbee-admin/src/main/resources/application-dev.yml b/springboot/fastbee-admin/src/main/resources/application-dev.yml index 1f64bf16..6f5f4dac 100644 --- a/springboot/fastbee-admin/src/main/resources/application-dev.yml +++ b/springboot/fastbee-admin/src/main/resources/application-dev.yml @@ -30,6 +30,14 @@ spring: merge-sql: true wall: none-base-statement-allow: true + taos: # 配置 taos 数据源 + enabled: false + type: com.alibaba.druid.pool.DruidDataSource + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8 + username: root + password: taosdata + dbName: fastbee_log # slave: # type: com.alibaba.druid.pool.DruidDataSource # driver-class-name: com.mysql.cj.jdbc.Driver @@ -43,7 +51,7 @@ spring: redis: host: localhost # 地址 port: 6379 # 端口,默认为6379 - database: 15 # 数据库索引 + database: 0 # 数据库索引 password: fastbee # 密码 timeout: 10s # 连接超时时间 lettuce: diff --git a/springboot/fastbee-admin/src/main/resources/application-prod.yml b/springboot/fastbee-admin/src/main/resources/application-prod.yml index 4223593e..86e1cac9 100644 --- a/springboot/fastbee-admin/src/main/resources/application-prod.yml +++ b/springboot/fastbee-admin/src/main/resources/application-prod.yml @@ -30,6 +30,14 @@ spring: merge-sql: true wall: none-base-statement-allow: true + taos: # 配置 taos 数据源 + enabled: false + type: com.alibaba.druid.pool.DruidDataSource + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8 + username: root + password: taosdata + dbName: fastbee_log # slave: # type: com.alibaba.druid.pool.DruidDataSource # driver-class-name: com.mysql.cj.jdbc.Driver diff --git a/springboot/fastbee-admin/src/main/resources/application-sql.yml b/springboot/fastbee-admin/src/main/resources/application-sql.yml index 114578bb..f80b6526 100644 --- a/springboot/fastbee-admin/src/main/resources/application-sql.yml +++ b/springboot/fastbee-admin/src/main/resources/application-sql.yml @@ -30,6 +30,34 @@ spring: merge-sql: true wall: none-base-statement-allow: true + taos: # 配置 taos 数据源 + enabled: false + type: com.alibaba.druid.pool.DruidDataSource + driver-class-name: com.taosdata.jdbc.rs.RestfulDriver + url: jdbc:TAOS-RS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8 + # driver-class-name: com.taosdata.jdbc.TSDBDriver + # url: jdbc:TAOS://fastbee:6030/fastbee_log?timezone=UTC-8&charset=utf-8 + # driver-class-name: com.taosdata.jdbc.ws.WebSocketDriver + # url: jdbc:TAOS-WS://fastbee:6041/fastbee_log?timezone=UTC-8&charset=utf-8 + username: root + password: taosdata + dbName: fastbee_log +# influx: # 配置 influx 数据源 +# enabled: false +# url: http://81.71.97.58:8086 +# token: inX0k-IPfSgKg6AIfoZm6Mv0DQyQOKCkfvs5ZF3a836Yzx2Ew9QgxsHev40_2gztuMn6tofwyS6nfbT4cD-SeA== +# bucket: device_log +# org: fastbee +# measurement: device_log +# iotdb: +# enabled: false +# driver-class-name: org.apache.iotdb.jdbc.IoTDBDriver +# url: jdbc:iotdb://81.71.97.58:6667/ +# username: root +# password: root +# dbName: root.ln +# druid: +# validation-query: '' # sqlServer: # 配置 SQLServer 数据源 # type: com.alibaba.druid.pool.DruidDataSource # driver-class-name: com.microsoft.sqlserver.jdbc.SQLServerDriver diff --git a/springboot/fastbee-open-api/src/main/java/com/fastbee/data/controller/DeviceLogController.java b/springboot/fastbee-open-api/src/main/java/com/fastbee/data/controller/DeviceLogController.java index 90851090..cb35b3bd 100644 --- a/springboot/fastbee-open-api/src/main/java/com/fastbee/data/controller/DeviceLogController.java +++ b/springboot/fastbee-open-api/src/main/java/com/fastbee/data/controller/DeviceLogController.java @@ -59,4 +59,16 @@ public class DeviceLogController extends BaseController return getDataTable(list); } + /** + * 新增设备日志 + */ + @ApiOperation("新增设备日志") + @PreAuthorize("@ss.hasPermi('iot:device:add')") + @Log(title = "设备日志", businessType = BusinessType.INSERT) + @PostMapping + public AjaxResult add(@RequestBody DeviceLog deviceLog) + { + return toAjax(deviceLogService.insertDeviceLog(deviceLog)); + } + } diff --git a/springboot/fastbee-server/mqtt-broker/src/main/java/com/fastbee/mqtt/service/impl/DataHandlerImpl.java b/springboot/fastbee-server/mqtt-broker/src/main/java/com/fastbee/mqtt/service/impl/DataHandlerImpl.java index 51282bd7..3492c528 100644 --- a/springboot/fastbee-server/mqtt-broker/src/main/java/com/fastbee/mqtt/service/impl/DataHandlerImpl.java +++ b/springboot/fastbee-server/mqtt-broker/src/main/java/com/fastbee/mqtt/service/impl/DataHandlerImpl.java @@ -6,11 +6,13 @@ import com.fastbee.common.exception.ServiceException; import com.fastbee.common.utils.DateUtils; import com.fastbee.common.utils.gateway.mq.TopicsUtils; import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; import com.fastbee.iot.domain.EventLog; import com.fastbee.common.core.thingsModel.ThingsModelSimpleItem; import com.fastbee.common.core.thingsModel.ThingsModelValuesInput; import com.fastbee.iot.service.IDeviceService; import com.fastbee.iot.service.IEventLogService; +import com.fastbee.iot.tsdb.service.ILogService; import com.fastbee.mq.model.ReportDataBo; import com.fastbee.mq.service.IDataHandler; import com.fastbee.mq.service.IMqttMessagePublish; @@ -44,6 +46,8 @@ public class DataHandlerImpl implements IDataHandler { private MqttRemoteManager remoteManager; @Resource private TopicsUtils topicsUtils; + @Resource + private ILogService logService; /** * 上报属性或功能处理 @@ -86,30 +90,32 @@ public class DataHandlerImpl implements IDataHandler { try { List thingsModelSimpleItems = JSON.parseArray(bo.getMessage(), ThingsModelSimpleItem.class); Device device = deviceService.selectDeviceBySerialNumber(bo.getSerialNumber()); - List results = new ArrayList<>(); + List results = new ArrayList<>(); for (int i = 0; i < thingsModelSimpleItems.size(); i++) { - // 添加到设备日志 - EventLog event = new EventLog(); - event.setDeviceId(device.getDeviceId()); - event.setDeviceName(device.getDeviceName()); - event.setLogValue(thingsModelSimpleItems.get(i).getValue()); - event.setRemark(thingsModelSimpleItems.get(i).getRemark()); - event.setSerialNumber(device.getSerialNumber()); - event.setIdentity(thingsModelSimpleItems.get(i).getId()); - event.setLogType(3); - event.setIsMonitor(0); - event.setUserId(device.getUserId()); - event.setUserName(device.getUserName()); - event.setTenantId(device.getTenantId()); - event.setTenantName(device.getTenantName()); - event.setCreateTime(DateUtils.getNowDate()); + DeviceLog deviceLog = new DeviceLog(); + deviceLog.setDeviceId(device.getDeviceId()); + deviceLog.setDeviceName(device.getDeviceName()); + deviceLog.setLogValue(thingsModelSimpleItems.get(i).getValue()); + deviceLog.setRemark(thingsModelSimpleItems.get(i).getRemark()); + deviceLog.setSerialNumber(device.getSerialNumber()); + deviceLog.setIdentify(thingsModelSimpleItems.get(i).getId()); + deviceLog.setLogType(3); + deviceLog.setIsMonitor(0); + deviceLog.setUserId(device.getTenantId()); + deviceLog.setUserName(device.getTenantName()); + deviceLog.setTenantId(device.getTenantId()); + deviceLog.setTenantName(device.getTenantName()); + deviceLog.setCreateBy(device.getCreateBy()); + deviceLog.setCreateTime(DateUtils.getNowDate()); // 1=影子模式,2=在线模式,3=其他 - event.setMode(2); - results.add(event); - //eventLogService.insertEventLog(event); + deviceLog.setMode(2); + results.add(deviceLog); } - eventLogService.insertBatch(results); - } catch (Exception e) { + for (DeviceLog deviceLog : results) { + logService.saveDeviceLog(deviceLog); + } + + }catch (Exception e) { log.error("接收事件,解析数据时异常 message={}", e.getMessage()); } } diff --git a/springboot/fastbee-service/fastbee-iot-service/pom.xml b/springboot/fastbee-service/fastbee-iot-service/pom.xml index fa358edf..d3e919d4 100644 --- a/springboot/fastbee-service/fastbee-iot-service/pom.xml +++ b/springboot/fastbee-service/fastbee-iot-service/pom.xml @@ -143,6 +143,28 @@ 19.3.0.0 + + + com.taosdata.jdbc + taos-jdbcdriver + ${tdengine.version} + + + + + + com.influxdb + influxdb-client-java + 6.7.0 + + + + + org.apache.iotdb + iotdb-jdbc + ${iotdb.version} + + diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/DeviceLog.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/DeviceLog.java index c7e273d1..6ef7875e 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/DeviceLog.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/DeviceLog.java @@ -66,7 +66,7 @@ public class DeviceLog extends BaseEntity /** 标识符 */ @ApiModelProperty("标识符") @Excel(name = "标识符") - private String identity; + private String identify; /** 是否监测数据(1=是,0=否) */ @ApiModelProperty("是否监测数据(1=是,0=否)") @@ -320,14 +320,14 @@ public class DeviceLog extends BaseEntity { return deviceName; } - public void setIdentity(String identity) + public void setIdentify(String identify) { - this.identity = identity; + this.identify = identify; } - public String getIdentity() + public String getIdentify() { - return identity; + return identify; } public void setIsMonitor(Integer isMonitor) { @@ -347,7 +347,7 @@ public class DeviceLog extends BaseEntity .append("logValue", getLogValue()) .append("deviceId", getDeviceId()) .append("deviceName", getDeviceName()) - .append("identity", getIdentity()) + .append("identify", getIdentify()) .append("createBy", getCreateBy()) .append("isMonitor", getIsMonitor()) .append("createTime", getCreateTime()) diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/EventLog.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/EventLog.java index abf92613..6869ea0a 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/EventLog.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/domain/EventLog.java @@ -23,7 +23,7 @@ public class EventLog extends BaseEntity { /** 标识符 */ @ApiModelProperty("标识符") @Excel(name = "标识符") - private String identity; + private String identify; /** 物模型名称 */ @ApiModelProperty("物模型名称") @@ -94,14 +94,14 @@ public class EventLog extends BaseEntity { { return logId; } - public void setIdentity(String identity) + public void setIdentify(String identify) { - this.identity = identity; + this.identify = identify; } - public String getIdentity() + public String getIdentify() { - return identity; + return identify; } public void setModelName(String modelName) { @@ -216,7 +216,7 @@ public class EventLog extends BaseEntity { public String toString() { return new ToStringBuilder(this, ToStringStyle.MULTI_LINE_STYLE) .append("logId", getLogId()) - .append("identity", getIdentity()) + .append("identify", getIdentify()) .append("modelName", getModelName()) .append("logType", getLogType()) .append("logValue", getLogValue()) diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/DeviceLogMapper.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/DeviceLogMapper.java index a5f71e06..b5ea300d 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/DeviceLogMapper.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/DeviceLogMapper.java @@ -3,13 +3,10 @@ package com.fastbee.iot.mapper; import com.fastbee.iot.domain.Device; import com.fastbee.iot.domain.DeviceLog; import com.fastbee.iot.model.DeviceStatistic; -import com.fastbee.iot.model.HistoryModel; import com.fastbee.iot.model.MonitorModel; -import com.fastbee.iot.tdengine.service.model.TdLogDto; import org.apache.ibatis.annotations.Param; import org.springframework.stereotype.Repository; -import java.util.Date; import java.util.List; /** @@ -44,6 +41,13 @@ public interface DeviceLogMapper */ public List selectMonitorList(DeviceLog deviceLog); + /** + * 新增设备日志 + * + * @param deviceLog 设备日志 + * @return 结果 + */ + public int insertDeviceLog(DeviceLog deviceLog); /** * 批量保存图片 diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/IotDbLogMapper.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/IotDbLogMapper.java new file mode 100644 index 00000000..61e77b80 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/IotDbLogMapper.java @@ -0,0 +1,38 @@ +package com.fastbee.iot.mapper; + +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.model.MonitorModel; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface IotDbLogMapper { + + void createDB(String database); + Long countDB(String database); + + int save(DeviceLog deviceLog); + + int deleteDeviceLogByDeviceNumber(@Param("serialNumber") String deviceNumber); + + Long selectPropertyLogCount(@Param("device") Device device); + + Long selectEventLogCount(@Param("device") Device device); + + Long selectMonitorLogCount(@Param("device") Device device); + + /*** + * 监测数据列表 + */ + List selectMonitorList(@Param("device") DeviceLog deviceLog); + + /*** + * 日志列表 + */ + List selectDeviceLogList(@Param("device") DeviceLog deviceLog); + List selectEventLogList(@Param("device") DeviceLog deviceLog); + +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/TDDeviceLogMapper.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/TDDeviceLogMapper.java new file mode 100644 index 00000000..4beb3629 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/mapper/TDDeviceLogMapper.java @@ -0,0 +1,84 @@ +package com.fastbee.iot.mapper; + +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.model.HistoryModel; +import com.fastbee.iot.model.MonitorModel; +import com.fastbee.iot.tsdb.model.TdLogDto; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +/** + * @package com.fastbee.mysql.mysql.tdengine + * 类名: DatabaseMapper + * 时间: 2022/5/16,0016 1:27 + * 开发人: wxy + */ +@Repository +public interface TDDeviceLogMapper { + + /*** + * 创建数据库 + */ + int createDB(String database); + + /*** + * 创建超级表 + */ + int createSTable(String database); + + + /*** + * 新增设备日志 + */ + int save(@Param("database") String database, @Param("device") DeviceLog deviceLog); + + /** + * 批量插入数据 + * + * @param database 数据库名 + * @param data list集合 + */ + int saveBatch(@Param("database") String database, @Param("data") TdLogDto data); + + /*** + * 设备属性数据总数 + */ + Long selectPropertyLogCount(@Param("database") String database, @Param("device") Device device); + + /*** + * 设备功能数据总数 + */ + Long selectFunctionLogCount(@Param("database") String database, @Param("device") Device device); + + /*** + * 设备事件数据总数 + */ + Long selectEventLogCount(@Param("database") String database, @Param("device") Device device); + + /*** + * 设备监测数据总数 + */ + Long selectMonitorLogCount(@Param("database") String database, @Param("device") Device device); + + /*** + * 监测数据列表 + */ + List selectMonitorList(@Param("database") String database, @Param("device") DeviceLog deviceLog); + + /*** + * 日志列表 + */ + List selectDeviceLogList(@Param("database") String database, @Param("device") DeviceLog deviceLog); + Page selectEventLogList(Page page, @Param("database") String database, @Param("device") DeviceLog deviceLog); + + /*** + * 根据设备ID删除设备日志 + */ + int deleteDeviceLogByDeviceNumber(@Param("database") String dbName, @Param("serialNumber") String serialNumber); + + +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/HistoryModel.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/HistoryModel.java index cad93d6a..7badacd0 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/HistoryModel.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/HistoryModel.java @@ -16,5 +16,5 @@ public class HistoryModel { private String value; - private String identity; + private String identify; } diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/ThingsModels/PropertyDto.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/ThingsModels/PropertyDto.java index 9c5b13e6..32870300 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/ThingsModels/PropertyDto.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/model/ThingsModels/PropertyDto.java @@ -18,6 +18,10 @@ public class PropertyDto private String id; /** 物模型名称 */ private String name; + /** + * 物模型值 + */ + private String value; /** 是否图表展示(0-否,1-是) */ private Integer isChart; /** 是否历史存储(0-否,1-是) */ diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/IDeviceLogService.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/IDeviceLogService.java index 6ced4190..c7629cd0 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/IDeviceLogService.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/IDeviceLogService.java @@ -32,4 +32,12 @@ public interface IDeviceLogService */ public List selectDeviceLogList(DeviceLog deviceLog); + /** + * 新增设备日志 + * + * @param deviceLog 设备日志 + * @return 结果 + */ + public int insertDeviceLog(DeviceLog deviceLog); + } diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceLogServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceLogServiceImpl.java index 2e5c155b..05cccbc9 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceLogServiceImpl.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceLogServiceImpl.java @@ -1,17 +1,16 @@ package com.fastbee.iot.service.impl; +import com.fastbee.common.core.domain.model.LoginUser; import com.fastbee.common.utils.DateUtils; +import com.fastbee.common.utils.SecurityUtils; import com.fastbee.iot.domain.DeviceLog; -import com.fastbee.iot.model.HistoryModel; -import com.fastbee.iot.tdengine.service.ILogService; -import com.fastbee.iot.mapper.DeviceLogMapper; +import com.fastbee.iot.tsdb.service.ILogService; import com.fastbee.iot.model.MonitorModel; import com.fastbee.iot.service.IDeviceLogService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import java.util.List; -import java.util.Map; /** * 设备日志Service业务层处理 @@ -52,4 +51,20 @@ public class DeviceLogServiceImpl implements IDeviceLogService } return logService.selectDeviceLogList(deviceLog); } + + /** + * 新增设备日志 + * + * @param deviceLog 设备日志 + * @return 结果 + */ + @Override + public int insertDeviceLog(DeviceLog deviceLog) { + deviceLog.setCreateTime(DateUtils.getNowDate()); + LoginUser loginUser = SecurityUtils.getLoginUser(); + deviceLog.setTenantId(loginUser.getDeptUserId()); + deviceLog.setUserId(loginUser.getUserId()); + deviceLog.setCreateBy(loginUser.getUsername()); + return logService.saveDeviceLog(deviceLog); + } } diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceRuntimeServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceRuntimeServiceImpl.java index 4e399022..a642944f 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceRuntimeServiceImpl.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceRuntimeServiceImpl.java @@ -67,7 +67,7 @@ public class DeviceRuntimeServiceImpl implements IDeviceRuntimeService { log.setModelName(specs.getName()); log.setLogType(type.getCode()); log.setSpecs(JSONObject.toJSONString(specs.getDatatype())); - log.setIdentity(specs.getId()); + log.setIdentify(specs.getId()); log.setSerialNumber(serialNumber); log.setSlaveId(specs.getSlaveId()); log.setIsMonitor(specs.getIsMonitor()); diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceServiceImpl.java index f62adeb5..b62f4e3a 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceServiceImpl.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/service/impl/DeviceServiceImpl.java @@ -14,6 +14,7 @@ import com.fastbee.common.core.thingsModel.ThingsModelSimpleItem; import com.fastbee.common.core.thingsModel.ThingsModelValuesInput; import com.fastbee.common.enums.DataEnum; import com.fastbee.common.enums.DeviceStatus; +import com.fastbee.common.enums.ThingsModelType; import com.fastbee.common.exception.ServiceException; import com.fastbee.common.utils.DateUtils; import com.fastbee.common.utils.StringUtils; @@ -34,7 +35,7 @@ import com.fastbee.iot.model.ThingsModels.ThingsModelValueItem; import com.fastbee.iot.model.ThingsModels.ValueItem; import com.fastbee.iot.service.*; import com.fastbee.iot.service.cache.IDeviceCache; -import com.fastbee.iot.tdengine.service.ILogService; +import com.fastbee.iot.tsdb.service.ILogService; import com.fastbee.system.service.ISysUserService; import org.quartz.SchedulerException; import org.slf4j.Logger; @@ -220,6 +221,10 @@ public class DeviceServiceImpl implements IDeviceService { String key = RedisKeyBuilder.buildTSLVCacheKey(input.getProductId(), input.getDeviceNumber()); Map maps = new HashMap(); List list = new ArrayList<>(); + //属性存储集合 + List deviceLogList = new ArrayList<>(); + //指令存储集合 + List functionLogList = new ArrayList<>(); for (ThingsModelSimpleItem item : input.getThingsModelValueRemarkItem()) { String identity = item.getId(); Integer slaveId = input.getSlaveId() == null ? item.getSlaveId() : input.getSlaveId(); @@ -281,12 +286,82 @@ public class DeviceServiceImpl implements IDeviceService { /* ★★★★★★★★★★★★★★★★★★★★★★ 处理数据 - 结束 ★★★★★★★★★★★★★★★★★★★★★★*/ /*★★★★★★★★★★★★★★★★★★★★★★ 存储数据 - 开始 ★★★★★★★★★★★★★★★★★★★★★★*/ - if (null != dto.getIsHistory()) { - - } + ThingsModelType modelType = ThingsModelType.getType(dto.getType()); + Device device = this.selectDeviceBySerialNumber(serialNumber); + switch (modelType) { + case PROP: + if (1 == dto.getIsHistory()) { + DeviceLog deviceLog = new DeviceLog(); + deviceLog.setSerialNumber(serialNumber); + deviceLog.setLogType(type); + // 1=影子模式,2=在线模式,3=其他 + deviceLog.setMode(isShadow ? 1 : 2); + // 设备日志值 + deviceLog.setLogValue(value); + deviceLog.setRemark(item.getRemark()); + deviceLog.setIdentify(id); + deviceLog.setCreateTime(DateUtils.getNowDate()); + deviceLog.setCreateBy(device.getCreateBy()); + deviceLog.setUserId(device.getTenantId()); + deviceLog.setUserName(device.getTenantName()); + deviceLog.setTenantId(device.getTenantId()); + deviceLog.setTenantName(device.getTenantName()); + deviceLog.setModelName(dto.getName()); + deviceLog.setIsMonitor(dto.getIsMonitor()); + deviceLogList.add(deviceLog); + } + break; + case SERVICE: + if (1 == dto.getIsHistory()) { + FunctionLog function = new FunctionLog(); + function.setCreateTime(DateUtils.getNowDate()); + function.setFunValue(value); + function.setSerialNumber(input.getDeviceNumber()); + function.setIdentify(id); + function.setShowValue(value); + // 属性获取 + function.setFunType(2); + function.setUserId(device.getTenantId()); + function.setCreateBy(device.getCreateBy()); + function.setModelName(dto.getName()); + functionLogList.add(function); + } + break; + case EVENT: + DeviceLog event = new DeviceLog(); + event.setDeviceId(device.getDeviceId()); + event.setDeviceName(device.getDeviceName()); + event.setLogValue(value); + event.setSerialNumber(serialNumber); + event.setIdentify(id); + event.setLogType(3); + event.setIsMonitor(0); + event.setUserId(device.getTenantId()); + event.setUserName(device.getTenantName()); + event.setTenantId(device.getTenantId()); + event.setTenantName(device.getTenantName()); + event.setCreateTime(DateUtils.getNowDate()); + event.setCreateBy(device.getCreateBy()); + // 1=影子模式,2=在线模式,3=其他 + event.setMode(2); + event.setModelName(dto.getName()); + deviceLogList.add(event); + break; + } list.add(item); } redisCache.hashPutAll(key, maps); + if (!CollectionUtils.isEmpty(functionLogList) && !isShadow) { + functionLogService.insertBatch(functionLogList); + } + if (!CollectionUtils.isEmpty(deviceLogList) && !isShadow) { + long baseTs = System.currentTimeMillis(); + for (int i = 0; i < deviceLogList.size(); i++) { + // 每条间隔1毫秒,避免TDengine时间冲突 + deviceLogList.get(i).setTs(new Date(baseTs + i)); + logService.saveDeviceLog(deviceLogList.get(i)); + } + } /* ★★★★★★★★★★★★★★★★★★★★★★ 存储数据 - 结束 ★★★★★★★★★★★★★★★★★★★★★★*/ return list; } @@ -891,31 +966,38 @@ public class DeviceServiceImpl implements IDeviceService { } } int result = deviceMapper.updateDeviceStatus(device); - // 添加到设备日志 - EventLog event = new EventLog(); - event.setDeviceId(device.getDeviceId()); - event.setDeviceName(device.getDeviceName()); - event.setSerialNumber(device.getSerialNumber()); - event.setIsMonitor(0); - event.setUserId(device.getUserId()); - event.setUserName(device.getUserName()); - event.setTenantId(device.getTenantId()); - event.setTenantName(device.getTenantName()); - event.setCreateTime(DateUtils.getNowDate()); - // 日志模式 1=影子模式,2=在线模式,3=其他 - event.setMode(3); + DeviceLog deviceLog = new DeviceLog(); + deviceLog.setDeviceId(device.getDeviceId()); + deviceLog.setDeviceName(device.getDeviceName()); + deviceLog.setSerialNumber(device.getSerialNumber()); + deviceLog.setIsMonitor(0); + deviceLog.setTenantId(device.getTenantId()); + deviceLog.setUserId(device.getTenantId()); + deviceLog.setUserName(device.getTenantName()); + deviceLog.setTenantName(device.getTenantName()); + deviceLog.setCreateTime(DateUtils.getNowDate()); + deviceLog.setCreateBy(device.getCreateBy()); + deviceLog.setMode(3); if (device.getStatus() == 3) { - event.setLogValue("1"); - event.setRemark("设备上线"); - event.setIdentity("online"); - event.setLogType(5); + deviceLog.setLogValue("1"); + deviceLog.setRemark("设备上线"); + deviceLog.setIdentify("online"); + deviceLog.setLogType(5); + log.info("设备上线,sn:{}", device.getSerialNumber()); } else if (device.getStatus() == 4) { - event.setLogValue("0"); - event.setRemark("设备离线"); - event.setIdentity("offline"); - event.setLogType(6); + deviceLog.setLogValue("0"); + deviceLog.setRemark("设备离线"); + deviceLog.setIdentify("offline"); + deviceLog.setLogType(6); + log.info("设备离线,sn:{}", device.getSerialNumber()); + } else if (device.getStatus() == 2) { + deviceLog.setLogValue("2"); + deviceLog.setRemark("设备禁用"); + deviceLog.setIdentify("disable"); + deviceLog.setLogType(8); + log.info("设备禁用,sn:{}", device.getSerialNumber()); } - eventLogMapper.insertEventLog(event); + logService.saveDeviceLog(deviceLog); return result; } diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/impl/MySqlLogServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/impl/MySqlLogServiceImpl.java deleted file mode 100644 index 1c63b5c0..00000000 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/impl/MySqlLogServiceImpl.java +++ /dev/null @@ -1,63 +0,0 @@ -package com.fastbee.iot.tdengine.service.impl; - -import com.fastbee.common.utils.DateUtils; -import com.fastbee.iot.domain.Device; -import com.fastbee.iot.domain.DeviceLog; -import com.fastbee.iot.model.DeviceStatistic; -import com.fastbee.iot.model.HistoryModel; -import com.fastbee.iot.tdengine.service.ILogService; -import com.fastbee.iot.mapper.DeviceLogMapper; -import com.fastbee.iot.model.MonitorModel; -import com.fastbee.iot.tdengine.service.model.TdLogDto; -import org.springframework.stereotype.Service; - -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -@Service -public class MySqlLogServiceImpl implements ILogService { - - private DeviceLogMapper deviceLogMapper; - - public MySqlLogServiceImpl(DeviceLogMapper _deviceLogMapper){ - this.deviceLogMapper=_deviceLogMapper; - } - - /*** - * 根据设备ID删除设备日志 - * @return - */ - @Override - public int deleteDeviceLogByDeviceNumber(String deviceNumber) { - return deviceLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber); - } - - /*** - * 设备属性、功能、事件和监测数据总数 - * @return - */ - @Override - public DeviceStatistic selectCategoryLogCount(Device device){ - return deviceLogMapper.selectCategoryLogCount(device); - } - - - /*** - * 监测数据列表 - * @return - */ - @Override - public List selectMonitorList(DeviceLog deviceLog) { - return deviceLogMapper.selectMonitorList(deviceLog); - } - - /*** - * 日志列表 - * @return - */ - @Override - public List selectDeviceLogList(DeviceLog deviceLog) { - return deviceLogMapper.selectDeviceLogList(deviceLog); - } -} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/InfluxConfig.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/InfluxConfig.java new file mode 100644 index 00000000..db60e8ad --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/InfluxConfig.java @@ -0,0 +1,75 @@ +package com.fastbee.iot.tsdb.config; + +import com.influxdb.client.*; +import lombok.Data; +import okhttp3.OkHttpClient; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.concurrent.TimeUnit; + +/** + * @Author gx_ma + * @Date: 2025/03/04/ 11:19 + * @description + */ +@Data +@Configuration +@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.influx") +public class InfluxConfig { + private boolean enabled; + private String url; + private String token; + private String org; + private String bucket; + private String measurement; + + + /** + * 创建 OkHttpClient 实例,用于 HTTP 请求配置(单例) + * + * @return OkHttpClient 实例 + */ + @Bean + @ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true") + public OkHttpClient okHttpClient() { + return new OkHttpClient.Builder() + .connectTimeout(30, TimeUnit.SECONDS) + .readTimeout(60, TimeUnit.SECONDS) + .connectionPool(new okhttp3.ConnectionPool(50, 1, TimeUnit.MINUTES)) + .build(); + } + + /** + * 创建 InfluxDBClient 客户端实例 + * + * @return InfluxDBClient 实例 + */ + @Bean + @ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true") + public InfluxDBClient influxDBClient(OkHttpClient okHttpClient) { + return InfluxDBClientFactory.create( + InfluxDBClientOptions.builder() + .url(this.url) + .org(this.org) + .bucket(this.bucket) + .authenticateToken(this.token.toCharArray()) + .okHttpClient(okHttpClient.newBuilder()) + .build() + ); + } + + /** + * 创建 WriteApiBlocking 写入 API 实例 + * + * @param influxDBClient InfluxDBClient 实例 + * @return WriteApiBlocking 实例 + */ + @Bean + @ConditionalOnProperty(prefix = "spring.datasource.dynamic.datasource.influx", name = "enabled", havingValue = "true") + public WriteApiBlocking writeApi(final InfluxDBClient influxDBClient) { + return influxDBClient.getWriteApiBlocking(); + } +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/IotDbConfig.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/IotDbConfig.java new file mode 100644 index 00000000..6debe43d --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/IotDbConfig.java @@ -0,0 +1,16 @@ +package com.fastbee.iot.tsdb.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +@Configuration +@Data +@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.iotdb") +public class IotDbConfig { + private boolean enabled; + private String dbName; + private String url; + private String username; + private String password; +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/TDengineConfig.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/TDengineConfig.java new file mode 100644 index 00000000..db06e502 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/config/TDengineConfig.java @@ -0,0 +1,22 @@ +package com.fastbee.iot.tsdb.config; + +import lombok.Data; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +/** + * 类名: TDengineConfig + * 描述: TDengine配置类 + * 时间: 2022/5/13,0016 1:14 + * 开发人: wxy + */ +@Configuration +@Data +@ConfigurationProperties(prefix = "spring.datasource.dynamic.datasource.taos") +public class TDengineConfig { + private boolean enabled; + private String dbName; + private String url; + private String username; + private String password; +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/init/ApplicationStarted.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/init/ApplicationStarted.java new file mode 100644 index 00000000..e1fc3f3e --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/init/ApplicationStarted.java @@ -0,0 +1,131 @@ +package com.fastbee.iot.tsdb.init; + +import com.alibaba.druid.pool.DruidDataSource; +import com.fastbee.iot.tsdb.config.InfluxConfig; +import com.fastbee.iot.tsdb.config.IotDbConfig; +import com.fastbee.iot.tsdb.config.TDengineConfig; +import com.fastbee.iot.tsdb.service.ILogService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; +import javax.annotation.Resource; +import java.sql.Connection; +import java.sql.PreparedStatement; + +/** + * 类名: ApplicationStarted + * 时间: 2022/5/18,0018 1:41 + * 开发人: wxy + */ +@Slf4j +@Component +public class ApplicationStarted { + @Resource + private ILogService tsdbService; + @Resource + private TDengineConfig tDengineConfig; + @Resource + private InfluxConfig influxConfig; + @Resource + private IotDbConfig iotDbConfig; + + @PostConstruct + public void run() { + //同时只能启用一个时序数据库 + // 缓存配置状态以减少重复调用 + boolean isTDengineEnabled = tDengineConfig.isEnabled(); + boolean isInfluxEnabled = influxConfig.isEnabled(); + boolean isIoTDBEnabled = iotDbConfig.isEnabled(); + + // 检查是否同时启用了多个时序数据库 + int enabledCount = (isTDengineEnabled ? 1 : 0) + (isInfluxEnabled ? 1 : 0) + (isIoTDBEnabled ? 1 : 0); + if (enabledCount > 1) { + log.error("只能启用一个时序数据库,当前启用的数据库包括:" + + (isTDengineEnabled ? "TDengine, " : "") + + (isInfluxEnabled ? "Influx, " : "") + + (isIoTDBEnabled ? "IoTDB" : "")); + return; + } + // 根据配置选择时序数据库 + if (isTDengineEnabled) { + try { + initTDengine(tDengineConfig.getDbName()); + log.info("使用TDengine存储设备数据,初始化成功,数据库名称: {}", tDengineConfig.getDbName()); + } catch (Exception e) { + log.error("TDengine初始化失败,数据库名称: {}, 错误信息: {}", tDengineConfig.getDbName(), e.getMessage(), e); + } + } else if (isInfluxEnabled) { + log.info("使用Influx存储设备数据,初始化成功"); + } else if (isIoTDBEnabled) { + initIoTDB(iotDbConfig.getDbName()); + log.info("使用IoTDB存储设备数据,初始化成功"); + } else { + log.info("未启用任何时序数据库,使用Mysql存储设备数据,初始化成功"); + } + } + + public void initIoTDB(String dbName) { + tsdbService.createSTable(dbName); + log.info("完成IoTDB超级表的创建"); + } + + /** + * @return + * @Method + * @Description 开始初始化加载系统参数, 创建数据库和超级表 + * @Param null + * @date 2022/5/22,0022 14:27 + * @author wxy + */ + public void initTDengine(String dbName) { + try { + createDatabase(); + //创建数据库表 + tsdbService.createSTable(dbName); + log.info("完成超级表的创建"); + } catch (Exception e) { + log.error("错误", e.getMessage()); + e.printStackTrace(); + } + + } + + /** + * @return + * @Method + * @Description 根据数据库连接自动创建数据库 + * @Param null + * @date 2022/5/24,0024 14:32 + * @author wxy + */ + private void createDatabase() { + try { + //去掉数据库名 + String jdbcUrl = tDengineConfig.getUrl(); + int startIndex = jdbcUrl.indexOf('/', 15); + int endIndex = jdbcUrl.indexOf('?'); + String newJdbcUrl = jdbcUrl.substring(0, startIndex); + newJdbcUrl = newJdbcUrl + jdbcUrl.substring(endIndex); + + DruidDataSource dataSource = new DruidDataSource(); + dataSource.setUrl(newJdbcUrl); + dataSource.setUsername(tDengineConfig.getUsername()); + dataSource.setPassword(tDengineConfig.getPassword()); + if (tDengineConfig.getUrl().contains("jdbc:TAOS://")) { + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + } else if (tDengineConfig.getUrl().contains("jdbc:TAOS-WS://")) { + dataSource.setDriverClassName("com.taosdata.jdbc.ws.WebSocketDriver"); + } else if(tDengineConfig.getUrl().contains("jdbc:TAOS-RS://")) { + dataSource.setDriverClassName("com.taosdata.jdbc.rs.RestfulDriver"); + } + Connection conn = dataSource.getConnection(); + PreparedStatement ps = conn.prepareStatement(String.format("create database if not exists %s;", tDengineConfig.getDbName())); + boolean resultS = ps.execute(); + log.info("完成数据库创建:{}",resultS); + } catch (Exception e) { + log.info("错误", e.getMessage()); + e.printStackTrace(); + } + } +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/model/TdLogDto.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/model/TdLogDto.java similarity index 89% rename from springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/model/TdLogDto.java rename to springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/model/TdLogDto.java index 54e1b91f..cb339935 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/model/TdLogDto.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/model/TdLogDto.java @@ -1,4 +1,4 @@ -package com.fastbee.iot.tdengine.service.model; +package com.fastbee.iot.tsdb.model; import com.fastbee.iot.domain.DeviceLog; import lombok.AllArgsConstructor; diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/ILogService.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/ILogService.java similarity index 74% rename from springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/ILogService.java rename to springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/ILogService.java index b89d8ef9..e8c92b7d 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tdengine/service/ILogService.java +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/ILogService.java @@ -1,17 +1,13 @@ -package com.fastbee.iot.tdengine.service; +package com.fastbee.iot.tsdb.service; import com.fastbee.iot.domain.Device; import com.fastbee.iot.domain.DeviceLog; import com.fastbee.iot.model.DeviceStatistic; -import com.fastbee.iot.model.HistoryModel; import com.fastbee.iot.model.MonitorModel; -import com.fastbee.iot.tdengine.service.model.TdLogDto; -import org.springframework.stereotype.Service; +import com.fastbee.iot.tsdb.model.TdLogDto; -import java.util.Date; import java.util.List; -import java.util.Map; /** * @package iot.iot.log @@ -22,6 +18,16 @@ import java.util.Map; */ public interface ILogService { + int createSTable(String database); + + /** 保存设备日志 **/ + int saveDeviceLog(DeviceLog deviceLog); + + /** + * 批量保存日志 + */ + int saveBatch(TdLogDto dto); + /** 根据设备编号删除设备日志 **/ int deleteDeviceLogByDeviceNumber(String deviceNumber); diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/InfluxLogService.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/InfluxLogService.java new file mode 100644 index 00000000..c784e9a5 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/InfluxLogService.java @@ -0,0 +1,514 @@ +package com.fastbee.iot.tsdb.service.impl; + +import com.baomidou.dynamic.datasource.annotation.DS; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.model.DeviceStatistic; +import com.fastbee.iot.model.HistoryModel; +import com.fastbee.iot.model.MonitorModel; +import com.fastbee.iot.tsdb.config.InfluxConfig; +import com.fastbee.iot.tsdb.service.ILogService; +import com.fastbee.iot.tsdb.model.TdLogDto; +import com.fastbee.iot.util.SnowflakeIdWorker; +import com.influxdb.client.InfluxDBClient; +import com.influxdb.client.QueryApi; +import com.influxdb.client.WriteApiBlocking; +import com.influxdb.client.domain.WritePrecision; +import com.influxdb.client.write.Point; +import com.influxdb.query.FluxRecord; +import com.influxdb.query.FluxTable; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Service; +import javax.annotation.Resource; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Instant; +import java.util.*; +import java.util.stream.Collectors; + +/** + * @Author gx_ma + * @Date: 2025/03/04/ 11:16 + * @description + */ +@Slf4j +@Primary +@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.influx.enabled", havingValue = "true") +@DS("influx") +@Service("Influx") +public class InfluxLogService implements ILogService { + + @Resource + private InfluxConfig influxConfig; + + @Resource + private InfluxDBClient influxDBClient; + + @Resource + private WriteApiBlocking writeApi; + + private SnowflakeIdWorker snowflakeIdWorker = new SnowflakeIdWorker(1); + + @Override + public int createSTable(String database) { + return 0; + } + + @Override + public int saveDeviceLog(DeviceLog deviceLog) { + long logId = snowflakeIdWorker.nextId(); + deviceLog.setLogId(logId); + Point point = Point.measurement(influxConfig.getMeasurement()) + .addTag("serialNumber", deviceLog.getSerialNumber()) + .addField("logId", deviceLog.getLogId()) + .addField("logType", deviceLog.getLogType()) + .addField("logValue", deviceLog.getLogValue()) + .addField("deviceId", deviceLog.getDeviceId()) + .addField("deviceName", deviceLog.getDeviceName()) + .addField("identify", deviceLog.getIdentify()) + .addField("createBy", deviceLog.getCreateBy()) + .addField("isMonitor", deviceLog.getIsMonitor()) + .addField("mode", deviceLog.getMode()) + .addField("remark", deviceLog.getRemark()) + .addField("userId", deviceLog.getUserId()) + .addField("userName", deviceLog.getUserName()) + .addField("tenantId", deviceLog.getTenantId()) + .addField("tenantName", deviceLog.getTenantName()) + .addField("modelName", deviceLog.getModelName()) + .time(deviceLog.getCreateTime().toInstant(), WritePrecision.NS); + writeApi.writePoint(influxConfig.getBucket(), influxConfig.getOrg(), point); + return 1; + } + + @Override + public int saveBatch(TdLogDto dto) { + int ret = 0; + for (DeviceLog deviceLog : dto.getList()) { + ret += this.saveDeviceLog(deviceLog); + } + return ret; + } + + @Override + public int deleteDeviceLogByDeviceNumber(String deviceNumber) { + QueryApi queryApi = influxDBClient.getQueryApi(); + + // 查询待删除的日志数量 + String countQuery = String.format( + "from(bucket: \"%s\")\n" + + " |> range(start: 0)\n" + + " |> filter(fn: (r) => r._measurement == \"%s\")\n" + + " |> filter(fn: (r) => r.serialNumber == \"%s\")\n" + + " |> limit(n: 1)\n" + + " |> count()", + influxConfig.getBucket(), + influxConfig.getMeasurement(), + deviceNumber + ); + + long count = queryApi.queryRaw(countQuery, influxConfig.getOrg()).length(); + + if (count > 0) { + // 构建删除语句 + String deleteQuery = String.format( + "import \"influxdata/influxdb/schema\"\n" + + "schema.delete(\n" + + " bucket: \"%s\",\n" + + " predicate: (r) => r.serialNumber == \"%s\" and r._measurement == \"%s\",\n" + + " start: 0,\n" + + " stop: now()\n" + + ")", + influxConfig.getBucket(), + deviceNumber, + influxConfig.getMeasurement() + ); + + try { + queryApi.queryRaw(deleteQuery, influxConfig.getOrg()); + } catch (Exception e) { + log.error("Failed to delete logs for device: {}", deviceNumber, e); + return 0; + } + } + + return (int) count; + } + + @Override + public DeviceStatistic selectCategoryLogCount(Device device) { + DeviceStatistic statistic = new DeviceStatistic(); + Long property = this.selectPropertyLogCount(device); + Long event = this.selectEventLogCount(device); + Long monitor = this.selectMonitorLogCount(device); + statistic.setPropertyCount(property == null ? 0 : property); + statistic.setEventCount(event == null ? 0 : event); + statistic.setMonitorCount(monitor == null ? 0 : monitor); + return statistic; + } + + private Long selectMonitorLogCount(Device device) { + QueryApi queryApi = influxDBClient.getQueryApi(); + // 构建 Flux 查询语句 + StringBuilder fluxQuery = new StringBuilder(); + fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n") + .append(" |> range(start: 0)\n") + .append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n") + .append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ") + .append(" |> filter(fn: (r) => r[\"logType\"] == 1 and r[\"isMonitor\"] == 1)"); + if (device.getTenantId() != null) { + fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")"); + } + if (!Objects.isNull(device.getCreateBy())) { + fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")"); + } + fluxQuery.append(" |> group()").append(" |> count(column: \"mode\")"); + + // 执行查询 + System.out.println("Monitor查询条件Flux Query: " + fluxQuery); + List tables = queryApi.query(fluxQuery.toString()); + // 处理查询结果 + if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) { + FluxRecord record = tables.get(0).getRecords().get(0); + return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L; + } + return 0L; + } + + private Long selectEventLogCount(Device device) { + QueryApi queryApi = influxDBClient.getQueryApi(); + StringBuilder fluxQuery = new StringBuilder(); + fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n") + .append(" |> range(start: 0)\n") + .append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n") + .append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ") + .append(" |> filter(fn: (r) => r[\"logType\"] == 3)"); + + if (device.getTenantId() != null) { + fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")"); + } + if (!Objects.isNull(device.getCreateBy())) { + fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")"); + } + + fluxQuery.append("|> group()").append("|> count(column: \"mode\")\n"); + + List tables = queryApi.query(fluxQuery.toString()); + System.out.println("Event查询条件Flux Query: " + fluxQuery); + if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) { + FluxRecord record = tables.get(0).getRecords().get(0); + return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L; + } + return 0L; + } + + private Long selectPropertyLogCount(Device device) { + QueryApi queryApi = influxDBClient.getQueryApi(); + StringBuilder fluxQuery = new StringBuilder(); + fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\")\n") + .append(" |> range(start: 0)\n") + .append(" |> filter(fn: (r) => r[\"_measurement\"] == \"").append(influxConfig.getMeasurement()).append("\")\n") + .append(" |> pivot(rowKey: [\"_time\"], columnKey: [\"_field\"], valueColumn: \"_value\") ") + .append(" |> filter(fn: (r) => r[\"logType\"] == 1)\n"); + + if (device.getTenantId() != null) { + fluxQuery.append(" |> filter(fn: (r) => r[\"tenantId\"] == ").append(device.getTenantId()).append(")"); + } + if (!Objects.isNull(device.getCreateBy())) { + fluxQuery.append(" |> filter(fn: (r) => r[\"createBy\"] == \"").append(device.getCreateBy()).append("\")"); + } + fluxQuery.append("|> group()").append(" |> count(column: \"mode\")\n"); + + List tables = queryApi.query(fluxQuery.toString()); + System.out.println("Property查询条件 Flux Query: " + fluxQuery); + if (!tables.isEmpty() && !tables.get(0).getRecords().isEmpty()) { + FluxRecord record = tables.get(0).getRecords().get(0); + return record.getValueByKey("mode") != null ? ((Long) record.getValueByKey("mode")) : 0L; + } + return 0L; + } + + @Override + public List selectDeviceLogList(DeviceLog deviceLog) { + QueryApi queryApi = influxDBClient.getQueryApi(); + + StringBuilder fluxQuery = new StringBuilder(); + fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") ") + .append("|> range(start: 0) ") + .append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") "); + + fluxQuery.append("|> pivot(\n" + + " rowKey:[\"_time\"], \n" + + " columnKey: [\"_field\"], \n" + + " valueColumn: \"_value\"\n" + + " )"); + fluxQuery.append("|> sort(columns: [\"_time\"], desc: true)") + .append("|> group()"); + + List filterConditions = new ArrayList<>(); + if (deviceLog.getIsMonitor() != null) { + filterConditions.add("r.isMonitor == " + deviceLog.getIsMonitor()); + } + if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) { + filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\""); + } + if (deviceLog.getLogType() != null) { + filterConditions.add("r.logType == " + deviceLog.getLogType()); + } else { + filterConditions.add("r.logType != 7"); + } + if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) { + filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/"); + } + + fluxQuery.append("|> filter(fn: (r) => "); + for (int i = 0; i < filterConditions.size(); i++) { + if (i > 0) { + fluxQuery.append(" and "); + } + fluxQuery.append(filterConditions.get(i)); + } + fluxQuery.append(") "); + + // 计算偏移量 + int pageNum = deviceLog.getPageNum(); + int pageSize = deviceLog.getPageSize(); + int offset = (pageNum - 1) * pageSize; + // 添加分页查询 + StringBuilder originalQuery = new StringBuilder(fluxQuery); + originalQuery.append("|> limit(n: ").append(pageSize).append(", offset: ").append(offset).append(")"); + + List tables = queryApi.query(originalQuery.toString()); + + List deviceLogList = new ArrayList<>(); + for (FluxTable table : tables) { + for (FluxRecord record : table.getRecords()) { + DeviceLog log = new DeviceLog(); + setDeviceLog(deviceLogList, record, log); + } + } + return deviceLogList; + + // 注意:由于使用了 limit 和 offset,这里无法直接获取总记录数,需要额外查询 +// List countTables = queryApi.query(fluxQuery.toString()); +// long total = 0; +// if (!countTables.isEmpty() && !countTables.get(0).getRecords().isEmpty()) { +// total = countTables.get(0).getRecords().size(); +// } +// // 创建 MyBatis-Plus 的 Page 对象 +// Page page = new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize()); +// page.setRecords(deviceLogList); +// page.setTotal(total); +// return page; + } + + private void setDeviceLog(List deviceLogList, FluxRecord record, DeviceLog log) { + log.setLogId((Long) record.getValueByKey("logId")); + log.setLogType(((Number) Objects.requireNonNull(record.getValueByKey("logType"))).intValue()); + log.setLogValue((String) record.getValueByKey("logValue")); + log.setDeviceId((Long) record.getValueByKey("deviceId")); + log.setDeviceName((String) record.getValueByKey("deviceName")); + log.setSerialNumber((String) record.getValueByKey("serialNumber")); + log.setIdentify((String) record.getValueByKey("identify")); + log.setCreateBy((String) record.getValueByKey("createBy")); + log.setIsMonitor(((Number) Objects.requireNonNull(record.getValueByKey("isMonitor"))).intValue()); + log.setMode(((Number) Objects.requireNonNull(record.getValueByKey("mode"))).intValue()); + log.setCreateTime(Date.from(Objects.requireNonNull(record.getTime()))); + log.setRemark((String) record.getValueByKey("remark")); + log.setUserId((Long) record.getValueByKey("userId")); + log.setUserName((String) record.getValueByKey("userName")); + log.setTenantId((Long) record.getValueByKey("tenantId")); + log.setTenantName((String) record.getValueByKey("tenantName")); + log.setModelName((String) record.getValueByKey("modelName")); + deviceLogList.add(log); + } + + +// @Override +// public Page selectEventLogList(DeviceLog deviceLog) { +// //事件日志的时间筛选,时间范围放在param参数中格式:yyyy-MM-dd,需要自行封装 HH:mm:ss +// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") { +// String beginTime = deviceLog.getParams().get("beginTime").toString(); +// String endTime = deviceLog.getParams().get("endTime").toString(); +// beginTime = beginTime + " 00:00:00"; +// endTime = endTime + " 23:59:59"; +// deviceLog.setBeginTime(beginTime); +// deviceLog.setEndTime(endTime); +// } +// QueryApi queryApi = influxDBClient.getQueryApi(); +// +// StringBuilder fluxQuery = new StringBuilder(); +// fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") "); +// +// // 处理时间范围 +// if (deviceLog.getBeginTime() != null && !deviceLog.getBeginTime().isEmpty() +// && deviceLog.getEndTime() != null && !deviceLog.getEndTime().isEmpty()) { +// SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); +// try { +// Date beginDate = sdf.parse(deviceLog.getBeginTime()); +// Date endDate = sdf.parse(deviceLog.getEndTime()); +// // 转换为RFC3339格式时间字符串 +// String startRFC3339 = beginDate.toInstant().toString(); +// String stopRFC3339 = endDate.toInstant().toString(); +// +// fluxQuery.append("|> range(start: ") +// .append(startRFC3339) +// .append(", stop: ") +// .append(stopRFC3339) +// .append(") "); +// } catch (ParseException e) { +// e.printStackTrace(); +// // 若解析失败,可使用默认时间范围 +// fluxQuery.append("|> range(start: 0) "); +// } +// } else { +// fluxQuery.append("|> range(start: 0) "); +// } +// +// fluxQuery.append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") "); +// +// // 原始查询添加 pivot 和分页操作 +// fluxQuery.append("|> pivot(\n" + +// " rowKey:[\"_time\"], \n" + +// " columnKey: [\"_field\"], \n" + +// " valueColumn: \"_value\"\n" + +// " )"); +// +// List filterConditions = new ArrayList<>(); +// if (deviceLog.getIsMonitor() != null) { +// filterConditions.add("r.isMonitor == " + deviceLog.getIsMonitor()); +// } +// if (deviceLog.getLogType() != null) { +// filterConditions.add("r.logType == " + deviceLog.getLogType()); +// } else { +// filterConditions.add("r.logType != 1 and r.logType != 2 and r.logType != 4 and r.logType != 7"); +// } +// if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) { +// filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\""); +// } +// if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) { +// filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/"); +// } +// +// fluxQuery.append("|> filter(fn: (r) => "); +// for (int i = 0; i < filterConditions.size(); i++) { +// if (i > 0) { +// fluxQuery.append(" and "); +// } +// fluxQuery.append(filterConditions.get(i)); +// } +// fluxQuery.append(") "); +// fluxQuery.append("|> sort(columns: [\"_time\"], desc: true)") +// .append("|> group()"); +// +// // 计算偏移量 +// int pageNum = deviceLog.getPageNum(); +// int pageSize = deviceLog.getPageSize(); +// int offset = (pageNum - 1) * pageSize; +// // 添加分页查询 +// StringBuilder originalQuery = new StringBuilder(fluxQuery); +// originalQuery.append("|> limit(n: ").append(pageSize).append(", offset: ").append(offset).append(")"); +// +// List tables = queryApi.query(originalQuery.toString()); +// System.out.println("EventList查询Flux语句:" + originalQuery); +// +// List deviceLogList = new ArrayList<>(); +// for (FluxTable table : tables) { +// for (FluxRecord record : table.getRecords()) { +// DeviceLog log = new DeviceLog(); +// setDeviceLog(deviceLogList, record, log); +// } +// } +// +// // 注意:由于使用了 limit 和 offset,这里无法直接获取总记录数,需要额外查询 +// List countTables = queryApi.query(fluxQuery.toString()); +// System.out.println("分页查询Flux语句:" + fluxQuery); +// long total = 0; +// if (!countTables.isEmpty() && !countTables.get(0).getRecords().isEmpty()) { +// total = (long) countTables.get(0).getRecords().size(); +// } +// +// Page page = new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize()); +// page.setRecords(deviceLogList); +// page.setTotal(total); +// return page; +// } + + @Override + public List selectMonitorList(DeviceLog deviceLog) { + QueryApi queryApi = influxDBClient.getQueryApi(); + StringBuilder fluxQuery = new StringBuilder(); + fluxQuery.append("from(bucket: \"").append(influxConfig.getBucket()).append("\") "); + + // 处理时间范围 + if (deviceLog.getBeginTime() != null && !deviceLog.getBeginTime().isEmpty() + && deviceLog.getEndTime() != null && !deviceLog.getEndTime().isEmpty()) { + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + try { + Date beginDate = sdf.parse(deviceLog.getBeginTime()); + Date endDate = sdf.parse(deviceLog.getEndTime()); + // 转换为RFC3339格式时间字符串 + String startRFC3339 = beginDate.toInstant().toString(); + String stopRFC3339 = endDate.toInstant().toString(); + + fluxQuery.append("|> range(start: ") + .append(startRFC3339) + .append(", stop: ") + .append(stopRFC3339) + .append(") "); + } catch (ParseException e) { + e.printStackTrace(); + // 若解析失败,可使用默认时间范围 + fluxQuery.append("|> range(start: 0) "); + } + } else { + fluxQuery.append("|> range(start: 0) "); + } + + fluxQuery.append("|> filter(fn: (r) => r._measurement == \"").append(influxConfig.getMeasurement()).append("\") "); + fluxQuery.append("|> pivot(\n" + + " rowKey:[\"_time\"], \n" + + " columnKey: [\"_field\"], \n" + + " valueColumn: \"_value\"\n" + + " )"); + fluxQuery.append("|> filter(fn: (r) => r.isMonitor == 1) "); + + List filterConditions = new ArrayList<>(); + if (deviceLog.getSerialNumber() != null && !deviceLog.getSerialNumber().isEmpty()) { + filterConditions.add("r.serialNumber == \"" + deviceLog.getSerialNumber() + "\""); + } + if (deviceLog.getIdentify() != null && !deviceLog.getIdentify().isEmpty()) { + filterConditions.add("r.identify =~ /.*" + deviceLog.getIdentify() + ".*/"); + } + + if (!filterConditions.isEmpty()) { + fluxQuery.append("|> filter(fn: (r) => "); + for (int i = 0; i < filterConditions.size(); i++) { + if (i > 0) { + fluxQuery.append(" and "); + } + fluxQuery.append(filterConditions.get(i)); + } + fluxQuery.append(") "); + } + + fluxQuery.append("|> sort(columns: [\"_time\"], desc: true) "); + fluxQuery.append("|> keep(columns: [\"_value\", \"_time\"]) "); + + List tables = queryApi.query(fluxQuery.toString()); + + List monitorList = new ArrayList<>(); + for (FluxTable table : tables) { + for (FluxRecord record : table.getRecords()) { + MonitorModel model = new MonitorModel(); + model.setValue((String) record.getValue()); + model.setTime(new Date(record.getTime().getEpochSecond() * 1000)); + monitorList.add(model); + } + } + return monitorList; + } + +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/IotDbLogService.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/IotDbLogService.java new file mode 100644 index 00000000..3f80b19e --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/IotDbLogService.java @@ -0,0 +1,120 @@ +package com.fastbee.iot.tsdb.service.impl; + +import com.baomidou.dynamic.datasource.annotation.DS; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.mapper.IotDbLogMapper; +import com.fastbee.iot.model.DeviceStatistic; +import com.fastbee.iot.model.HistoryModel; +import com.fastbee.iot.model.MonitorModel; +import com.fastbee.iot.tsdb.service.ILogService; +import com.fastbee.iot.tsdb.model.TdLogDto; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Service; + +import javax.annotation.Resource; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +@Slf4j +@Primary +@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.iotdb.enabled", havingValue = "true") +@DS("iotdb") +@Service("IotDB") +public class IotDbLogService implements ILogService { + + @Resource + private IotDbLogMapper iotDbLogMapper; + + + @Override + public int createSTable(String database) { + Long count = iotDbLogMapper.countDB(database); + if (count == 0) { + iotDbLogMapper.createDB(database); + } + return 1; + } + + @Override + public int saveDeviceLog(DeviceLog deviceLog) { + return iotDbLogMapper.save(deviceLog); + } + + @Override + public int saveBatch(TdLogDto dto) { + int ret = 0; + for (DeviceLog deviceLog : dto.getList()) { + ret += this.saveDeviceLog(deviceLog); + } + return ret; + } + + @Override + public int deleteDeviceLogByDeviceNumber(String deviceNumber) { + return iotDbLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber); + } + + @Override + public DeviceStatistic selectCategoryLogCount(Device device) { + DeviceStatistic statistic = new DeviceStatistic(); + Long property = iotDbLogMapper.selectPropertyLogCount(device); + Long event = iotDbLogMapper.selectEventLogCount(device); + Long monitor = iotDbLogMapper.selectMonitorLogCount(device); + statistic.setPropertyCount(property == null ? 0 : property); + statistic.setEventCount(event == null ? 0 : event); + statistic.setMonitorCount(monitor == null ? 0 : monitor); + return statistic; + } + + @Override + public List selectDeviceLogList(DeviceLog deviceLog) { + if (deviceLog.getIdentify() != null) { + deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%"); + } + return iotDbLogMapper.selectDeviceLogList(deviceLog); + } + +// @Override +// public Page selectEventLogList(DeviceLog deviceLog) { +// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") { +// String beginTime = deviceLog.getParams().get("beginTime").toString(); +// String endTime = deviceLog.getParams().get("endTime").toString(); +// beginTime = beginTime + " 00:00:00"; +// endTime = endTime + " 23:59:59"; +// deviceLog.setBeginTime(beginTime); +// deviceLog.setEndTime(endTime); +// } +// if (deviceLog.getIdentify() != null) { +// deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%"); +// } +// // 获取全量数据 +// List allLogs = iotDbLogMapper.selectEventLogList(deviceLog); +// +// // 手动分页处理 +// int pageSize = deviceLog.getPageSize(); +// int pageNum = deviceLog.getPageNum(); +// int start = (pageNum - 1) * pageSize; +// int end = Math.min(start + pageSize, allLogs.size()); +// +// // 构建MyBatis-Plus分页对象 +// Page page = new Page<>(pageNum, pageSize); +// page.setRecords(allLogs.subList(start, end)); +// page.setTotal(allLogs.size()); +// +// return page; +// } + + @Override + public List selectMonitorList(DeviceLog deviceLog) { + if (deviceLog.getIdentify() != null) { + deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%"); + } + return iotDbLogMapper.selectMonitorList(deviceLog); + } + +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/MySqlLogServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/MySqlLogServiceImpl.java new file mode 100644 index 00000000..5b8c3cfb --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/MySqlLogServiceImpl.java @@ -0,0 +1,109 @@ +package com.fastbee.iot.tsdb.service.impl; + +import com.fastbee.common.utils.DateUtils; +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.domain.EventLog; +import com.fastbee.iot.mapper.EventLogMapper; +import com.fastbee.iot.model.DeviceStatistic; +import com.fastbee.iot.tsdb.model.TdLogDto; +import com.fastbee.iot.tsdb.service.ILogService; +import com.fastbee.iot.mapper.DeviceLogMapper; +import com.fastbee.iot.model.MonitorModel; +import org.springframework.stereotype.Service; + +import javax.annotation.Resource; +import java.util.List; + +@Service +public class MySqlLogServiceImpl implements ILogService { + + private DeviceLogMapper deviceLogMapper; + @Resource + private EventLogMapper eventLogMapper; + + public MySqlLogServiceImpl(DeviceLogMapper _deviceLogMapper){ + this.deviceLogMapper=_deviceLogMapper; + } + + @Override + public int createSTable(String database) { + return 0; + } + + /*** + * 新增设备日志 + * @return + */ + @Override + public int saveDeviceLog(DeviceLog deviceLog) { + if (deviceLog.getLogType() == 3 || deviceLog.getLogType() == 5 || deviceLog.getLogType() == 6 || deviceLog.getLogType() == 8) { + EventLog event = new EventLog(); + event.setDeviceId(deviceLog.getDeviceId()); + event.setDeviceName(deviceLog.getDeviceName()); + event.setSerialNumber(deviceLog.getSerialNumber()); + event.setIsMonitor(0); + event.setUserId(deviceLog.getTenantId()); + event.setUserName(deviceLog.getTenantName()); + event.setTenantId(deviceLog.getTenantId()); + event.setTenantName(deviceLog.getTenantName()); + event.setCreateTime(DateUtils.getNowDate()); + event.setCreateBy(deviceLog.getCreateBy()); + // 日志模式 1=影子模式,2=在线模式,3=其他 + event.setMode(3); + event.setLogValue(deviceLog.getLogValue()); + event.setRemark(deviceLog.getRemark()); + event.setIdentify(deviceLog.getIdentify()); + event.setLogType(deviceLog.getLogType()); + return eventLogMapper.insertEventLog(event); + } else { + return deviceLogMapper.insertDeviceLog(deviceLog); + } + } + + @Override + public int saveBatch(TdLogDto dto) { + int ret = 0; + for (DeviceLog deviceLog : dto.getList()) { + ret += this.saveDeviceLog(deviceLog); + } + return ret; + } + + /*** + * 根据设备ID删除设备日志 + * @return + */ + @Override + public int deleteDeviceLogByDeviceNumber(String deviceNumber) { + return deviceLogMapper.deleteDeviceLogByDeviceNumber(deviceNumber); + } + + /*** + * 设备属性、功能、事件和监测数据总数 + * @return + */ + @Override + public DeviceStatistic selectCategoryLogCount(Device device){ + return deviceLogMapper.selectCategoryLogCount(device); + } + + + /*** + * 监测数据列表 + * @return + */ + @Override + public List selectMonitorList(DeviceLog deviceLog) { + return deviceLogMapper.selectMonitorList(deviceLog); + } + + /*** + * 日志列表 + * @return + */ + @Override + public List selectDeviceLogList(DeviceLog deviceLog) { + return deviceLogMapper.selectDeviceLogList(deviceLog); + } +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/TdengineLogServiceImpl.java b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/TdengineLogServiceImpl.java new file mode 100644 index 00000000..166e83c1 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/java/com/fastbee/iot/tsdb/service/impl/TdengineLogServiceImpl.java @@ -0,0 +1,126 @@ +package com.fastbee.iot.tsdb.service.impl; + +import com.baomidou.dynamic.datasource.annotation.DS; +import com.fastbee.iot.domain.Device; +import com.fastbee.iot.domain.DeviceLog; +import com.fastbee.iot.model.DeviceStatistic; +import com.fastbee.iot.tsdb.service.ILogService; +import com.fastbee.iot.model.MonitorModel; +import com.fastbee.iot.mapper.TDDeviceLogMapper; +import com.fastbee.iot.tsdb.model.TdLogDto; +import com.fastbee.iot.util.SnowflakeIdWorker; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Service; +import java.util.List; + +/** + * 类名: TdengineLogServiceImpl + * 描述: TDengine存储日志数据实现类 + * 时间: 2022/5/22,0022 13:38 + * 开发人: admin + */ +@Slf4j +@Primary +@ConditionalOnProperty(name = "spring.datasource.dynamic.datasource.taos.enabled", havingValue = "true") +@DS("taos") +@Service("Tdengine") +public class TdengineLogServiceImpl implements ILogService { + + @Autowired + private TDDeviceLogMapper tDDeviceLogMapper; + + + private SnowflakeIdWorker snowflakeIdWorker = new SnowflakeIdWorker(1); + + @Value("${spring.datasource.dynamic.datasource.taos.dbName}") + private String dbName; + + + @Override + public int createSTable(String database) { + return tDDeviceLogMapper.createSTable(database); + } + + /*** + * 新增设备日志 + * @return + */ + @Override + public int saveDeviceLog(DeviceLog deviceLog) { + long logId = snowflakeIdWorker.nextId(); + deviceLog.setLogId(logId); + return tDDeviceLogMapper.save(dbName, deviceLog); + } + + /** + * 批量保存日志 + */ + @Override + public int saveBatch(TdLogDto dto) { + return tDDeviceLogMapper.saveBatch(dbName, dto); + } + + /*** + * 设备属性、功能、事件和监测数据总数 + * @return + */ + @Override + public DeviceStatistic selectCategoryLogCount(Device device) { + DeviceStatistic statistic = new DeviceStatistic(); + Long property = tDDeviceLogMapper.selectPropertyLogCount(dbName, device); + Long event = tDDeviceLogMapper.selectEventLogCount(dbName, device); + Long monitor = tDDeviceLogMapper.selectMonitorLogCount(dbName, device); + statistic.setPropertyCount(property == null ? 0 : property); + statistic.setEventCount(event == null ? 0 : event); + statistic.setMonitorCount(monitor == null ? 0 : monitor); + return statistic; + } + + /*** + * 日志列表 + * @return + */ + @Override + public List selectDeviceLogList(DeviceLog deviceLog) { + return tDDeviceLogMapper.selectDeviceLogList(dbName, deviceLog); + } + +// @Override +// public Page selectEventLogList(DeviceLog deviceLog) { +// if (deviceLog.getParams().get("beginTime") != null && deviceLog.getParams().get("beginTime") != "" && deviceLog.getParams().get("endTime") != null && deviceLog.getParams().get("endTime") != "") { +// String beginTime = deviceLog.getParams().get("beginTime").toString(); +// String endTime = deviceLog.getParams().get("endTime").toString(); +// beginTime = beginTime + " 00:00:00"; +// endTime = endTime + " 23:59:59"; +// deviceLog.setBeginTime(beginTime); +// deviceLog.setEndTime(endTime); +// } +// return tDDeviceLogMapper.selectEventLogList(new Page<>(deviceLog.getPageNum(), deviceLog.getPageSize()), dbName, deviceLog); +// } + + /*** + * 监测数据列表 + * @return + */ + @Override + public List selectMonitorList(DeviceLog deviceLog) { + if (deviceLog.getIdentify() != null) { + deviceLog.setIdentify("%" + deviceLog.getIdentify() + "%"); + } + return tDDeviceLogMapper.selectMonitorList(dbName, deviceLog); + } + + /*** + * 根据设备ID删除设备日志 + * @return + */ + @Override + public int deleteDeviceLogByDeviceNumber(String deviceNumber) { + return tDDeviceLogMapper.deleteDeviceLogByDeviceNumber(dbName, deviceNumber); + } + +} diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/DeviceLogMapper.xml b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/DeviceLogMapper.xml index 88eb821a..816379b2 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/DeviceLogMapper.xml +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/DeviceLogMapper.xml @@ -16,7 +16,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" - + @@ -31,7 +31,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" - + @@ -42,7 +42,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" select log_value, create_time from iot_device_log and is_monitor=1 - and identify = #{identity} + and identify = #{identify} and device_id = #{deviceId} and serial_number = #{serialNumber} and create_time between #{beginTime} and #{endTime} @@ -94,6 +94,45 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" + + insert into iot_device_log + + log_type, + log_value, + device_id, + device_name, + serial_number, + identify, + create_by, + is_monitor, + mode, + create_time, + remark, + user_id, + user_name, + tenant_id, + tenant_name, + model_name, + + + #{logType}, + #{logValue}, + #{deviceId}, + #{deviceName}, + #{serialNumber}, + #{identify}, + #{createBy}, + #{isMonitor}, + #{mode}, + #{createTime}, + #{remark}, + #{userId}, + #{userName}, + #{tenantId}, + #{tenantName}, + #{modelName}, + + insert into iot_device_log (log_type,log_value,device_id,device_name,serial_number,identify,create_by, @@ -101,7 +140,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" values (#{item.logType},#{item.logValue},#{item.deviceId},#{item.deviceName},#{item.serialNumber}, - #{item.identity},#{item.createBy},#{item.isMonitor},#{item.mode},#{item.createTime},#{item.remark}, + #{item.identify},#{item.createBy},#{item.isMonitor},#{item.mode},#{item.createTime},#{item.remark}, #{item.userId},#{item.userName},#{item.tenantId},#{item.tenantName},#{item.modelName}) @@ -114,7 +153,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" device_id = #{deviceId}, device_name = #{deviceName}, serial_number = #{serialNumber}, - identify = #{identity}, + identify = #{identify}, create_by = #{createBy}, is_monitor = #{isMonitor}, mode = #{mode}, @@ -150,7 +189,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" and device_id = #{deviceId} and serial_number = #{serialNumber} and log_type = #{logType} - and identity like concat('%', #{identity}, '%') + and identify like concat('%', #{identify}, '%') order by create_time desc diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/EventLogMapper.xml b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/EventLogMapper.xml index 896a7c32..f7ecba41 100644 --- a/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/EventLogMapper.xml +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/EventLogMapper.xml @@ -6,7 +6,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" - + @@ -31,7 +31,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" + count databases ${database} + + + + + INSERT INTO root.ln.device_log ( + + log_type + , log_value + , device_id + , device_name + ,serial_number + , identify + , create_by + , is_monitor + , mode + , remark + , tenant_id + + ) VALUES ( + + #{logType} + , #{logValue} + , #{deviceId} + , #{deviceName} + ,#{serialNumber} + , #{identify} + , #{createBy} + , #{isMonitor} + , #{mode} + , #{remark} + , #{tenantId} + + ) + + + + DELETE FROM root.ln.device_log.** + + + + + + + + + + + + + + + + + + + diff --git a/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/TDDeviceLogMapper.xml b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/TDDeviceLogMapper.xml new file mode 100644 index 00000000..9e5a29c0 --- /dev/null +++ b/springboot/fastbee-service/fastbee-iot-service/src/main/resources/mapper/iot/TDDeviceLogMapper.xml @@ -0,0 +1,176 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + create database if not exists ${database} vgroups 4; + + + + create STABLE if not exists ${database}.device_log + (ts timestamp, + log_value BINARY(100), + is_monitor TINYINT, + log_type TINYINT, + identify BINARY(100), + mode TINYINT, + remark BINARY(500), + tenant_id BIGINT, + create_by BINARY(50)) + TAGS(serial_number BINARY(50)); + + + + insert into ${database}.device_${device.serialNumber} using device_log + tags (#{device.serialNumber}) + values ( + + #{device.ts}, + + + now, + + #{device.logValue}, + #{device.isMonitor}, + #{device.logType}, + #{device.identify}, + #{device.mode}, + #{device.remark}, + #{device.tenantId}, + #{device.createBy}); + + + + insert into ${database}.device_${data.serialNumber} using device_log + tags (#{data.serialNumber}) + values + + (now, + #{device.logValue}, + #{device.isMonitor}, + #{device.logType}, + #{device.identify}, + #{device.mode}, + #{device.remark}, + #{device.tenantId}, + #{device.createBy}) + + + + + DROP TABLE IF EXISTS ${database}.device_${serialNumber}; + + + + + + + + + + + + + + + + + diff --git a/springboot/fastbee-service/fastbee-system-service/src/main/resources/mapper/system/SysUserMapper.xml b/springboot/fastbee-service/fastbee-system-service/src/main/resources/mapper/system/SysUserMapper.xml index a627f525..044b1d45 100644 --- a/springboot/fastbee-service/fastbee-system-service/src/main/resources/mapper/system/SysUserMapper.xml +++ b/springboot/fastbee-service/fastbee-system-service/src/main/resources/mapper/system/SysUserMapper.xml @@ -170,7 +170,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" #{status}, #{createBy}, #{remark}, - sysdate() + current_timestamp ) @@ -189,7 +189,7 @@ PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" login_date = #{loginDate}, update_by = #{updateBy}, remark = #{remark}, - update_time = sysdate() + update_time = current_timestamp where user_id = #{userId} diff --git a/springboot/pom.xml b/springboot/pom.xml index 930599ea..c3ef4252 100644 --- a/springboot/pom.xml +++ b/springboot/pom.xml @@ -42,7 +42,8 @@ 3.5.3.1 3.5.3.1 4.3.1 - 2.0.38 + 3.4.0 + 1.3.3 32.0.1-jre 2.2.3 3.3.1 diff --git a/springboot/sql/iotdb/iotdb.sql b/springboot/sql/iotdb/iotdb.sql new file mode 100644 index 00000000..a092ba31 --- /dev/null +++ b/springboot/sql/iotdb/iotdb.sql @@ -0,0 +1,64 @@ + +create database root.ln +DELETE database root.ln + +count databases root.ln +// 创建测点 +CREATE timeseries root.ln.device_log.log_type WITH DATATYPE=INT32, ENCODING=RLE; +CREATE timeseries root.ln.device_log.log_value WITH DATATYPE=TEXT, ENCODING=PLAIN; +CREATE timeseries root.ln.device_log.device_id WITH DATATYPE=INT64, ENCODING=RLE; +CREATE timeseries root.ln.device_log.device_name WITH DATATYPE=TEXT, ENCODING=PLAIN; +CREATE timeseries root.ln.device_log.serial_number WITH DATATYPE=TEXT, ENCODING=PLAIN; +CREATE timeseries root.ln.device_log.identify WITH DATATYPE=TEXT, ENCODING=PLAIN; +CREATE timeseries root.ln.device_log.create_by WITH DATATYPE=TEXT, ENCODING=PLAIN; +CREATE timeseries root.ln.device_log.is_monitor WITH DATATYPE=INT32, ENCODING=RLE; +CREATE timeseries root.ln.device_log.mode WITH DATATYPE=INT32, ENCODING=RLE; +CREATE timeseries root.ln.device_log.tenant_id WITH DATATYPE=INT32, ENCODING=RLE; +CREATE timeseries root.ln.device_log.remark WITH DATATYPE=TEXT, ENCODING=PLAIN; + +-- // 删除测点 +-- delete timeseries root.ln.device_log.** +-- drop timeseries root.ln.device_log.** +-- +-- // 设置ttl +-- set ttl to root.ln 360000 +-- unset ttl from root.ln +-- +-- // 创建设备模板 +-- create device template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +-- create device template t2 aligned (lat FLOAT encoding=Gorilla, lon FLOAT encoding=Gorilla) + +-- set device template t1 to root.device_log.sn1 +-- set device template t2 to root.device_log.sn2 + +-- create timeseries using device template on root.device_log.sn1 +-- create timeseries using device template on root.device_log.sn2 + +-- // 解绑设备模板 +-- delete timeseries of device template t1 from root.device_log.sn1 +-- deactivate device template t1 from root.device_log.sn1 +-- unset device template t1 from root.device_log.sn1 + +-- // 删除设备模板 +-- drop device template t1 + +-- // 查看设备模板 +-- show device templates +-- show nodes in device template t1 +-- show nodes in device template t2 +-- show paths set device template t1 +-- show paths using device template t1 + +-- // 插入数据 +-- insert into root.device_log.sn3(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by) +-- values(1,1,'设备1',1,'100','1',1,1,'设备1','备注','admin') +-- +-- insert into root.device_log.sn4(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by) +-- values(1,1,'设备2',1,'100','1',1,1,'设备2','备注','admin'); +-- insert into root.device_log.sn4(tenant_id,device_id,device_name,log_type,log_value,identify,is_monitor,mode,model_name,remark,create_by) +-- values(1,1,'设备2',1,'101','1',1,1,'设备2','备注','admin') + +-- // 查询插入数据 +-- select * from root.device_log.sn3 +-- select * from root.device_log.sn4 +-- select * from root.device_log.D1ELV3A5TOJS