更新 日志

This commit is contained in:
caixiang 2021-09-25 14:25:08 +08:00
parent 5c20deaae1
commit 303664eda3
8 changed files with 295 additions and 58 deletions

View File

@ -8,6 +8,11 @@ For further reference, please consider the following sections:
* [Spring Boot Maven Plugin Reference Guide](https://docs.spring.io/spring-boot/docs/2.5.4/maven-plugin/reference/html/)
* [Create an OCI image](https://docs.spring.io/spring-boot/docs/2.5.4/maven-plugin/reference/html/#build-image)
系统相关
日志问题
① 一般系统的 debug模式 只有在开发调试阶段才会开启,生产环境是不开的。开启==》application.yml 下的logging 注释打开 ;;关闭 ==》application.yml 下的logging代码打上注释。
② work目录下日志一般是汇总的所有模块的日志包括所有模块他们依赖第三方jar包 输出的日志) ,其他目录下的日志是 手动用代码输出的日志
MQ 相关
1.src/main/java/com/qgs/dc/mq/configuration 包下新建 配置类。主要的作用的是在 MQ上新建队列如果队列信息不存在的话
水平扩展其他设备的时候 只要control+R 然后 00B=>00C 然后replace all

27
pom.xml
View File

@ -131,6 +131,33 @@
<!-- jackson依赖 结束 -->
<!-- 西门子S7 驱动 开始 -->
<dependency>
<groupId>org.apache.plc4x</groupId>
<artifactId>plc4j-api</artifactId>
<version>0.9.0</version>
</dependency>
<dependency>
<groupId>org.apache.plc4x</groupId>
<artifactId>plc4j-connection-cache</artifactId>
<version>0.9.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.plc4x/plc4j-connection-pool -->
<dependency>
<groupId>org.apache.plc4x</groupId>
<artifactId>plc4j-connection-pool</artifactId>
<version>0.9.0</version>
</dependency>
<dependency>
<groupId>org.apache.plc4x</groupId>
<artifactId>plc4j-driver-s7</artifactId>
<version>0.9.0</version>
<scope>runtime</scope>
</dependency>
<!-- 西门子S7 驱动 结束 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>

View File

@ -7,7 +7,7 @@ public class NodeIdKey {
private String nodeName;
private Integer namespace;
private String identifier;
private List<NodeIdKey> child;
private List<NodeIdKey> children;
/**
* type 类型 (就是Node)
@ -43,7 +43,7 @@ public class NodeIdKey {
this.namespace = namespace;
this.identifier = identifier;
this.varType = varType;
this.child = new ArrayList<>();
this.children = new ArrayList<>();
}
public Integer getNodeType() {
@ -62,12 +62,12 @@ public class NodeIdKey {
this.varType = varType;
}
public List<NodeIdKey> getChild() {
return child;
public List<NodeIdKey> getChildren() {
return children;
}
public void setChild(List<NodeIdKey> child) {
this.child = child;
public void setChildren(List<NodeIdKey> children) {
this.children = children;
}

View File

@ -1338,7 +1338,7 @@ public class UAService {
NodeId rootNodeId = new NodeId(rootNameSpace,identifier);
String rootName = rootNameSpace+","+identifier;
NodeIdKey rootNodeIdKey = new NodeIdKey(rootName, NodeClass.Object.getValue(), rootNameSpace, identifier,"根目录");
rootNodeIdKey.setChild(browseNodeA(rootNodeIdKey, opcUaClients.get(plcName), rootNodeId));
rootNodeIdKey.setChildren(browseNodeA(rootNodeIdKey, opcUaClients.get(plcName), rootNodeId));
return rootNodeIdKey;
}
@ -1381,7 +1381,7 @@ public class UAService {
//NodeIdKey nodeIdKey = new NodeIdKey(browName,66,Integer.parseInt(String.valueOf(browseRoot.getNamespaceIndex())),browseRoot.getIdentifier().toString());
// 目录也是一个NodeId所以要递归的去遍历看看是否 是目录
nodeIdKey1.setChild(browseNodeA(nodeIdKey1, client, node.getNodeId()));
nodeIdKey1.setChildren(browseNodeA(nodeIdKey1, client, node.getNodeId()));
}
} catch (UaException e) {
logger.error("Browsing nodeId={} failed: {}", browseRoot, e.getMessage(), e);

View File

@ -0,0 +1,89 @@
package com.qgs.dc.s7.controller;
import com.qgs.dc.opcua.arg.*;
import com.qgs.dc.opcua.controller.R;
import com.qgs.dc.s7.service.S7Service;
import org.apache.plc4x.java.PlcDriverManager;
import org.apache.plc4x.java.api.PlcConnection;
import org.apache.plc4x.java.api.exceptions.PlcConnectionException;
import org.apache.plc4x.java.api.messages.PlcReadRequest;
import org.apache.plc4x.java.api.messages.PlcReadResponse;
import org.apache.plc4x.java.api.types.PlcResponseCode;
import org.apache.plc4x.java.api.value.PlcValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.concurrent.CompletableFuture;
@RestController
@RequestMapping("/s7")
public class S7Controller {
private static final Logger logger = LoggerFactory.getLogger(S7Controller.class);
@Autowired
S7Service s7Service;
@PostMapping("/addThisPlc")
public R addThisPlc() throws PlcConnectionException {
Integer plc1 = s7Service.addPlc("plc1", "s7://192.168.0.200");
return R.ok().put("res",plc1);
}
@PostMapping("/getValue")
public R getValue() throws PlcConnectionException {
try(PlcConnection conn = s7Service.getConnection("s7://192.168.0.200")) {
if(conn.isConnected()){
if(conn.getMetadata().canRead()){
try {
PlcReadRequest.Builder builder = conn.readRequestBuilder();
//builder.addItem("INT_1", "%DB10:4:INT");
//builder.addItem("Byte-array", "%DB10:312:BYTE[20]");
builder.addItem("STRING", "%DB10:10.0:STRING(20)");
PlcReadRequest readRequest = builder.build();
//PlcReadResponse response = readRequest.execute().get();
CompletableFuture<? extends PlcReadResponse> execute = readRequest.execute();
PlcReadResponse response = execute.get();
for (String fieldName : response.getFieldNames()) {
if(response.getResponseCode(fieldName) == PlcResponseCode.OK) {
int numValues = response.getNumberOfValues(fieldName);
PlcValue asPlcValue = response.getAsPlcValue();
if(numValues == 1) {
Object obj = response.getObject(fieldName);
logger.info("Value[" + fieldName + "]: " + response.getObject(fieldName));
}
else {
logger.info("Value[" + fieldName + "]:");
for(int i = 0; i < numValues; i++) {
logger.info(" - " + response.getObject(fieldName, i));
}
}
}
else {
logger.error("Error[" + fieldName + "]: " + response.getResponseCode(fieldName).name());
}
}
}catch (Exception e){
logger.error(e.getMessage());
}
}else {
System.out.println("断线后 不能 read");
}
}else {
System.out.println("conn 为 connected");
}
}catch (Exception e){
System.out.println(e.getMessage());
}
return R.ok().put("res",1);
}
}

View File

@ -0,0 +1,43 @@
package com.qgs.dc.s7.service;
import org.apache.plc4x.java.PlcDriverManager;
import org.apache.plc4x.java.api.PlcConnection;
import org.apache.plc4x.java.api.exceptions.PlcConnectionException;
import org.apache.plc4x.java.utils.connectionpool.PooledPlcDriverManager;
import org.apache.plc4x.java.utils.connectionpool2.CachedDriverManager;
import org.eclipse.milo.opcua.sdk.client.OpcUaClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import java.util.HashMap;
/**
* @Desc: ""
* @Author: caixiang
* @DATE: 2021/9/23 15:16
*/
@Component
public class S7Service {
private final Logger logger = LoggerFactory.getLogger(getClass());
private HashMap<String, PlcConnection> plcConnections = new HashMap<>();
private PlcDriverManager driverManager;
public S7Service(){
driverManager = new PooledPlcDriverManager();
}
public Integer addPlc(String plcName,String url) throws PlcConnectionException {
return 1;
}
public PlcConnection getConnection(String url) throws PlcConnectionException {
return driverManager.getConnection(url);
}
}

View File

@ -1,5 +1,10 @@
server:
port: 8009
# 只有把 springboot 的debug 模式开启slf4j 才会记录日志
#logging:
# level:
# root: debug
#
spring:
rabbitmq:
# 如果是rabbitmq+haproxy+keepalived集群 那么192.168.0.176是haproxy代理的地址严格来说是keepalived的vip

View File

@ -12,7 +12,7 @@
<property name="logging.pathwork" value="C:/qgs_logger/work" />
<property name="logging.pathopc" value="C:/qgs_logger/opc" />
<property name="logging.pathmq" value="C:/qgs_logger/mq" />
<property name="logging.s7" value="C:/qgs_logger/s7" />
<!--0. 日志格式和颜色渲染 -->
<!-- 彩色日志依赖的渲染类 -->
@ -24,15 +24,19 @@
<!--1. 输出到控制台-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<!--此日志appender是为开发使用只配置最底级别控制台输出的日志级别是大于或等于此级别的日志信息-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>debug</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<!-- 设置字符集 -->
<charset>UTF-8</charset>
</encoder>
<!--此日志appender是为开发使用只配置最底级别控制台输出的日志级别是大于或等于此级别的日志信息-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
</appender>
<!--2. 输出到文档-->
@ -45,19 +49,6 @@
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<!-- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">-->
<!-- &lt;!&ndash; 日志归档 &ndash;&gt;-->
<!-- <fileNamePattern>${logging.pathwork}/work-log-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>-->
<!--&lt;!&ndash; <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">&ndash;&gt;-->
<!--&lt;!&ndash; <maxFileSize>100MB</maxFileSize>&ndash;&gt;-->
<!--&lt;!&ndash; </timeBasedFileNamingAndTriggeringPolicy>&ndash;&gt;-->
<!-- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">-->
<!-- <maxFileSize>100MB</maxFileSize>-->
<!-- </timeBasedFileNamingAndTriggeringPolicy>-->
<!-- &lt;!&ndash;日志文档保留天数&ndash;&gt;-->
<!-- <maxHistory>999</maxHistory>-->
<!-- </rollingPolicy>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.pathwork}/work-log-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
@ -67,8 +58,6 @@
<totalSizeCap>200GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文档只记录debug级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>debug</level>
@ -138,6 +127,32 @@
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="S7_DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文档的路径及文档名 -->
<file>${logging.s7}/debug/s7-log-debug.log</file>
<!--日志文档输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 设置字符集 -->
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.s7}/s7-log-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!-- each file should be at most 100MB, keep 60 days worth of history, but at most 20GB -->
<maxFileSize>100MB</maxFileSize>
<maxHistory>999</maxHistory>
<totalSizeCap>200GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文档只记录debug级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>debug</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 2.2 level为 INFO 日志,时间滚动输出 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
@ -148,16 +163,6 @@
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<!-- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">-->
<!-- &lt;!&ndash; 每天日志归档路径以及格式 &ndash;&gt;-->
<!-- <fileNamePattern>${logging.pathwork}/work-log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>-->
<!-- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">-->
<!-- <maxFileSize>100MB</maxFileSize>-->
<!-- </timeBasedFileNamingAndTriggeringPolicy>-->
<!-- &lt;!&ndash;日志文档保留天数&ndash;&gt;-->
<!-- <maxHistory>999</maxHistory>-->
<!-- </rollingPolicy>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.pathwork}/work-log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
@ -242,6 +247,30 @@
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="S7_INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文档的路径及文档名 -->
<file>${logging.s7}/info/s7-log-info.log</file>
<!--日志文档输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.s7}/s7-log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!-- each file should be at most 100MB, keep 60 days worth of history, but at most 20GB -->
<maxFileSize>100MB</maxFileSize>
<maxHistory>999</maxHistory>
<totalSizeCap>200GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文档只记录info级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>info</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 2.3 level为 WARN 日志,时间滚动输出 -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
@ -343,6 +372,30 @@
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="S7_WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文档的路径及文档名 -->
<file>${logging.s7}/warn/s7-log-warn.log</file>
<!--日志文档输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.s7}/s7-log-warn-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!-- each file should be at most 100MB, keep 60 days worth of history, but at most 20GB -->
<maxFileSize>100MB</maxFileSize>
<maxHistory>999</maxHistory>
<totalSizeCap>200GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文档只记录warn级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>warn</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 2.4 level为 ERROR 日志,时间滚动输出 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
@ -388,16 +441,6 @@
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<!-- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">-->
<!-- <fileNamePattern>${logging.pathopc}/opcua-log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>-->
<!-- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">-->
<!-- <maxFileSize>100MB</maxFileSize>-->
<!-- </timeBasedFileNamingAndTriggeringPolicy>-->
<!-- &lt;!&ndash;日志文档保留天数&ndash;&gt;-->
<!-- <maxHistory>999</maxHistory>-->
<!-- </rollingPolicy>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.pathopc}/opcua-log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
@ -422,15 +465,6 @@
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<!-- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">-->
<!-- <fileNamePattern>${logging.pathmq}/mq-log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>-->
<!-- <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">-->
<!-- <maxFileSize>100MB</maxFileSize>-->
<!-- </timeBasedFileNamingAndTriggeringPolicy>-->
<!-- &lt;!&ndash;日志文档保留天数&ndash;&gt;-->
<!-- <maxHistory>999</maxHistory>-->
<!-- </rollingPolicy>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
@ -448,6 +482,32 @@
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="S7_ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文档的路径及文档名 -->
<file>${logging.s7}/error/s7-log-error.log</file>
<!--日志文档输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- rollover daily -->
<fileNamePattern>${logging.s7}/s7-log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<!-- each file should be at most 100MB, keep 60 days worth of history, but at most 20GB -->
<maxFileSize>100MB</maxFileSize>
<maxHistory>999</maxHistory>
<totalSizeCap>200GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文档只记录ERROR级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!--
<logger>用来设置某一个包或者具体的某一个类的日志打印级别、
以及指定<appender><logger>仅有一个name属性
@ -475,7 +535,7 @@
可以包含零个或多个元素标识这个appender将会添加到这个logger。
-->
<!-- name就是包名这个包下的 所有logger 输出就以下配置 -->
<!-- name就是包名这个包下的 所有logger 输出就以下配置这里的logger只会输出代码里 你指定打印的log -->
<logger name="com.qgs.dc.opcua" additivity="false">
<!--使用哪一个Appender-->
<appender-ref ref="OPCUA_DEBUG_FILE" />
@ -491,6 +551,14 @@
<appender-ref ref="MQ_ERROR_FILE" />
</logger>
<logger name="com.qgs.dc.s7" additivity="false">
<appender-ref ref="S7_DEBUG_FILE" />
<appender-ref ref="S7_INFO_FILE" />
<appender-ref ref="S7_WARN_FILE" />
<appender-ref ref="S7_ERROR_FILE" />
</logger>
<!-- work这个目录 是所有控制台信息 + 第三方jar包 输出的信息 -->
<root level="info">
<appender-ref ref="CONSOLE" />
<appender-ref ref="DEBUG_FILE" />