1、引入jar包
<!--切换数据源 管理分布式事务-->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-jta-atomikos</artifactId>
</dependency>
<!--切换数据源 管理分布式事务-->
2、配置第一个数据源
package com.ars.datacenter.service.core.datasource;
import com.alibaba.druid.pool.xa.DruidXADataSource;
import com.atomikos.icatch.jta.UserTransactionImp;
import com.atomikos.icatch.jta.UserTransactionManager;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.jta.atomikos.AtomikosDataSourceBean;
import org.springframework.context.EnvironmentAware;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.transaction.jta.JtaTransactionManager;
import tk.mybatis.spring.annotation.MapperScan;
import javax.sql.DataSource;
import javax.transaction.UserTransaction;
import java.sql.SQLException;
/**
* 类名称: AmsDataSourceConfig
* 类描述: 实现EnvironmentAware接口,可读取application.yml配置文件内容
* * 该类创建一个数据源
* @author fancl
* @since 2020/12/14
*/
@Configuration
@MapperScan(basePackages = "com.ars.datacenter.service.core.repository.ams", sqlSessionFactoryRef = "amsSqlSessionFactory")
public class AmsDataSourceConfig implements EnvironmentAware {
private Environment env;
public void setEnvironment(Environment environment) {
this.env = environment;
}
@Primary
@Bean(name = "amsDataSource")
public DataSource getDateSource() throws SQLException {
String prefix = "spring.datasource.druid.test1.";
DruidXADataSource druidXADataSource = new DruidXADataSource();
druidXADataSource.setUrl(env.getProperty(prefix + "url"));
druidXADataSource.setUsername(env.getProperty(prefix + "username"));
druidXADataSource.setPassword(env.getProperty(prefix + "password"));
druidXADataSource.setDriverClassName(env.getProperty(prefix + "driverClassName"));
druidXADataSource.setInitialSize(env.getProperty(prefix + "initialSize", Integer.class));
druidXADataSource.setMaxActive(env.getProperty(prefix + "maxActive", Integer.class));
druidXADataSource.setMinIdle(env.getProperty(prefix + "minIdle", Integer.class));
druidXADataSource.setMaxWait(env.getProperty(prefix + "maxWait", Integer.class));
druidXADataSource.setPoolPreparedStatements(env.getProperty(prefix + "poolPreparedStatements", Boolean.class));
druidXADataSource.setMaxPoolPreparedStatementPerConnectionSize(env.getProperty(prefix + "maxPoolPreparedStatementPerConnectionSize", Integer.class));
druidXADataSource.setValidationQuery(env.getProperty(prefix + "validationQuery"));
druidXADataSource.setValidationQueryTimeout(env.getProperty(prefix + "validationQueryTimeout", Integer.class));
druidXADataSource.setTestOnBorrow(env.getProperty(prefix + "testOnBorrow", Boolean.class));
druidXADataSource.setTestOnReturn(env.getProperty(prefix + "testOnReturn", Boolean.class));
druidXADataSource.setTestWhileIdle(env.getProperty(prefix + "testWhileIdle", Boolean.class));
druidXADataSource.setTimeBetweenEvictionRunsMillis(env.getProperty(prefix + "timeBetweenEvictionRunsMillis", Integer.class));
druidXADataSource.setMinEvictableIdleTimeMillis(env.getProperty(prefix + "minEvictableIdleTimeMillis", Integer.class));
druidXADataSource.setFilters(env.getProperty(prefix + "filters"));
AtomikosDataSourceBean atomikosDataSourceBean = new AtomikosDataSourceBean();
atomikosDataSourceBean.setXaDataSource(druidXADataSource);
atomikosDataSourceBean.setUniqueResourceName("amsDataSource");
atomikosDataSourceBean.setMinPoolSize(3);
atomikosDataSourceBean.setMaxPoolSize(25);
atomikosDataSourceBean.setMaintenanceInterval(28000);
atomikosDataSourceBean.setTestQuery(env.getProperty(prefix + "validationQuery"));
return atomikosDataSourceBean;
}
/*
* 注意:【使用这个来做总事务 后面的数据源就不用设置事务了】
* */
@Bean(name = "transactionManager")
@Primary
public JtaTransactionManager regTransactionManager () {
UserTransactionManager userTransactionManager = new UserTransactionManager();
UserTransaction userTransaction = new UserTransactionImp();
return new JtaTransactionManager(userTransaction, userTransactionManager);
}
@Bean(name = "amsSqlSessionFactory")
@Primary
public SqlSessionFactory amsSqlSessionFactory(@Qualifier("amsDataSource") DataSource amsDataSource)
throws Exception {
SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
bean.setDataSource(amsDataSource);
bean.setMapperLocations(
new PathMatchingResourcePatternResolver().getResources("classpath:mapper/*.xml"));
return bean.getObject();
}
}
注意:【使用JtaTransactionManager 这个来做总事务管理 后面的数据源就不用设置事务了】
3、配置第二个数据源
package com.ars.datacenter.service.core.datasource;
import com.alibaba.druid.pool.xa.DruidXADataSource;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.jta.atomikos.AtomikosDataSourceBean;
import org.springframework.context.EnvironmentAware;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import tk.mybatis.spring.annotation.MapperScan;
import javax.sql.DataSource;
import java.sql.SQLException;
/**
* 类描述 实现EnvironmentAware接口,可读取application.yml配置文件内容
* 该类创建一个数据源
*
* @author fancl
* @since 2020/12/14 19:42
**/
@Configuration
@MapperScan(basePackages = "com.ars.datacenter.service.core.repository.cctbireport", sqlSessionFactoryRef = "cctbiSqlSessionFactory")
public class CctbiDataSourceConfig implements EnvironmentAware {
private Environment env;
public void setEnvironment(Environment environment) {
this.env = environment;
}
@Bean(name = "cctbiDataSource")
public DataSource getDateSource() throws SQLException {
String prefix = "spring.datasource.druid.test2.";
DruidXADataSource druidXADataSource = new DruidXADataSource();
druidXADataSource.setUrl(env.getProperty(prefix + "url"));
druidXADataSource.setUsername(env.getProperty(prefix + "username"));
druidXADataSource.setPassword(env.getProperty(prefix + "password"));
druidXADataSource.setDriverClassName(env.getProperty(prefix + "driverClassName"));
druidXADataSource.setInitialSize(env.getProperty(prefix + "initialSize", Integer.class));
druidXADataSource.setMaxActive(env.getProperty(prefix + "maxActive", Integer.class));
druidXADataSource.setMinIdle(env.getProperty(prefix + "minIdle", Integer.class));
druidXADataSource.setMaxWait(env.getProperty(prefix + "maxWait", Integer.class));
druidXADataSource.setPoolPreparedStatements(env.getProperty(prefix + "poolPreparedStatements", Boolean.class));
druidXADataSource.setMaxPoolPreparedStatementPerConnectionSize(env.getProperty(prefix + "maxPoolPreparedStatementPerConnectionSize", Integer.class));
druidXADataSource.setValidationQuery(env.getProperty(prefix + "validationQuery"));
druidXADataSource.setValidationQueryTimeout(env.getProperty(prefix + "validationQueryTimeout", Integer.class));
druidXADataSource.setTestOnBorrow(env.getProperty(prefix + "testOnBorrow", Boolean.class));
druidXADataSource.setTestOnReturn(env.getProperty(prefix + "testOnReturn", Boolean.class));
druidXADataSource.setTestWhileIdle(env.getProperty(prefix + "testWhileIdle", Boolean.class));
druidXADataSource.setTimeBetweenEvictionRunsMillis(env.getProperty(prefix + "timeBetweenEvictionRunsMillis", Integer.class));
druidXADataSource.setMinEvictableIdleTimeMillis(env.getProperty(prefix + "minEvictableIdleTimeMillis", Integer.class));
druidXADataSource.setFilters(env.getProperty(prefix + "filters"));
AtomikosDataSourceBean atomikosDataSourceBean = new AtomikosDataSourceBean();
atomikosDataSourceBean.setXaDataSource(druidXADataSource);
atomikosDataSourceBean.setUniqueResourceName("cctbiDataSource");
atomikosDataSourceBean.setMinPoolSize(3);
atomikosDataSourceBean.setMaxPoolSize(25);
atomikosDataSourceBean.setMaintenanceInterval(28000);
atomikosDataSourceBean.setTestQuery(env.getProperty(prefix + "validationQuery"));
return atomikosDataSourceBean;
}
@Bean(name = "cctbiSqlSessionFactory")
public SqlSessionFactory amsSqlSessionFactory(@Qualifier("cctbiDataSource") DataSource cctbiDataSource)
throws Exception {
SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
bean.setDataSource(cctbiDataSource);
bean.setMapperLocations(
new PathMatchingResourcePatternResolver().getResources("classpath:mapper/*.xml"));
return bean.getObject();
}
}
4、application.yml配置文件
spring:
profiles:
active: "dev"
application:
name: demo
datasource:
type: com.alibaba.druid.pool.DruidDataSource
druid:
test1:
driver_class_name: com.mysql.jdbc.Driver
name: ams
url: jdbc:mysql://localhost:3306/test1
username: mysql
password: 123456
initialSize: 10 #启动程序时,在连接池中初始化多少个连接
minIdle: 10 #回收空闲连接时,将保证至少有minIdle个连接
maxActive: 20 #连接池中最多支持多少个活动会话
maxWait: 600000 #程序向连接池中请求连接时,超过maxWait的值后,认为本次请求失败,即连接池没有可用连接,单位毫秒,设置-1时表示无限等待
#keepAlive: true #程序没有close连接且空闲时长超过 minEvictableIdleTimeMillis,则会执行validationQuery指定的SQL,以保证该程序连接不会池kill掉,其范围不超过minIdle指定的连接个数
timeBetweenEvictionRunsMillis: 60000 #检查空闲连接的频率,单位毫秒, 非正整数时表示不进行检查
numTestsPerEvictionRun: 300
minEvictableIdleTimeMillis: 300000 #池中某个连接的空闲时长达到 N 毫秒后, 连接池在下次检查空闲连接时,将回收该连接,要小于防火墙超时设置
testWhileIdle: true #当程序请求连接,池在分配连接时,是否先检查该连接是否有效。(高效)
validationQuery: select 'x' FROM DUAL #检查池中的连接是否仍可用的 SQL 语句,drui会连接到数据库执行该SQL, 如果正常返回,则表示连接可用,否则表示连接不可用
validationQueryTimeout: 10000
testOnBorrow: true #程序 申请 连接时,进行连接有效性检查(低效,影响性能)
testOnReturn: false #程序 返还 连接时,进行连接有效性检查(低效,影响性能)
# poolPreparedStatements: false #关闭缓存sql
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
filters: stat #这里配置的是插件,常用的插件有:监控统计: filter:stat 日志监控: filter:log4j 或者 slf4j 防御SQL注入: filter:wall
# 通过connectProperties属性来打开mergeSql功能;慢SQL记录
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
# 合并多个DruidDataSource的监控数据
useGlobalDataSourceStat: true
test2:
driver_class_name: com.mysql.jdbc.Driver
name: cct_bi_report
url: jdbc:mysql://localhost:3306/test2
username: mysql
password: 123456
initialSize: 10 #启动程序时,在连接池中初始化多少个连接
minIdle: 10 #回收空闲连接时,将保证至少有minIdle个连接
maxActive: 20 #连接池中最多支持多少个活动会话
maxWait: 600000 #程序向连接池中请求连接时,超过maxWait的值后,认为本次请求失败,即连接池没有可用连接,单位毫秒,设置-1时表示无限等待
#keepAlive: true #程序没有close连接且空闲时长超过 minEvictableIdleTimeMillis,则会执行validationQuery指定的SQL,以保证该程序连接不会池kill掉,其范围不超过minIdle指定的连接个数
timeBetweenEvictionRunsMillis: 60000 #检查空闲连接的频率,单位毫秒, 非正整数时表示不进行检查
numTestsPerEvictionRun: 300
minEvictableIdleTimeMillis: 300000 #池中某个连接的空闲时长达到 N 毫秒后, 连接池在下次检查空闲连接时,将回收该连接,要小于防火墙超时设置
testWhileIdle: true #当程序请求连接,池在分配连接时,是否先检查该连接是否有效。(高效)
validationQuery: select 'x' FROM DUAL #检查池中的连接是否仍可用的 SQL 语句,drui会连接到数据库执行该SQL, 如果正常返回,则表示连接可用,否则表示连接不可用
validationQueryTimeout: 10000
testOnBorrow: true #程序 申请 连接时,进行连接有效性检查(低效,影响性能)
testOnReturn: false #程序 返还 连接时,进行连接有效性检查(低效,影响性能)
# poolPreparedStatements: false #关闭缓存sql
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
filters: stat #这里配置的是插件,常用的插件有:监控统计: filter:stat 日志监控: filter:log4j 或者 slf4j 防御SQL注入: filter:wall
# 通过connectProperties属性来打开mergeSql功能;慢SQL记录
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
# 合并多个DruidDataSource的监控数据
useGlobalDataSourceStat: true
logging:
level: DEBUG
config: classpath:logback/logback_dev.xml
server:
port: 8080
mybatis:
mapper-locations: classpath:mapper/*.xml
type-aliases-package: com.fcl.demo
configuration:
call-setters-on-nulls: true
5、由于jta-atomikos 自带打印info级别日志,感觉很烦,可以设置如下提高打印级别日志(最后几行)
<?xml version="1.0" encoding="utf-8"?>
<configuration scan="true" scanPeriod="10 seconds">
<!-- 文件输出格式 -->
<property name="pattern" value="%d{yyyy-MM-dd HH:mm:ss.SSS} -%5p ${PID:-} [%15.15t] %-40.40logger{39} : %m%n"/>
<property name="charsetEncoding" value="UTF-8"/>
<!-- 生产的日志目录 -->
<property name="log_dir" value="./logs/demo"/>
<!-- 日志最大的历史 30天 -->
<property name="maxHistory" value="30"/>
<!--控制台日志-->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="dailyRollingFileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${log_dir}/demo.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover -->
<FileNamePattern>${log_dir}/demo.%d{yyyy-MM-dd}.log</FileNamePattern>
<!-- keep 30 days' worth of history -->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<Pattern>${pattern}</Pattern>
<charset>${charsetEncoding}</charset>
</encoder>
</appender>
<appender name="ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 过滤器,只记录ERROR级别的日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!-- 最常用的滚动策略,它根据时间来制定滚动策略.既负责滚动也负责出发滚动 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志输出位置 可相对、和绝对路径 -->
<fileNamePattern>${log_dir}/%d{yyyy-MM-dd}/error-log.log</fileNamePattern>
<!-- 可选节点,控制保留的归档文件的最大数量,超出数量就删除旧文件假设设置每个月滚动,且<maxHistory>是6,
则只保存最近6个月的文件,删除之前的旧文件。注意,删除旧文件是,那些为了归档而创建的目录也会被删除-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n</pattern>
<charset>${charsetEncoding}</charset>
</encoder>
</appender>
<appender name="WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 过滤器,只记录WARN级别的日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>WARN</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!-- 最常用的滚动策略,它根据时间来制定滚动策略.既负责滚动也负责出发滚动 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志输出位置 可相对、和绝对路径 -->
<fileNamePattern>${log_dir}/%d{yyyy-MM-dd}/warn-log.log</fileNamePattern>
<!-- 可选节点,控制保留的归档文件的最大数量,超出数量就删除旧文件假设设置每个月滚动,且<maxHistory>是6,
则只保存最近6个月的文件,删除之前的旧文件。注意,删除旧文件是,那些为了归档而创建的目录也会被删除-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %n%msg%n</pattern>
<charset>${charsetEncoding}</charset>
</encoder>
</appender>
<appender name="INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 过滤器,只记录WARN级别的日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!-- 最常用的滚动策略,它根据时间来制定滚动策略.既负责滚动也负责出发滚动 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志输出位置 可相对、和绝对路径 -->
<fileNamePattern>${log_dir}/%d{yyyy-MM-dd}/info-log.log</fileNamePattern>
<!-- 可选节点,控制保留的归档文件的最大数量,超出数量就删除旧文件假设设置每个月滚动,且<maxHistory>是6,
则只保存最近6个月的文件,删除之前的旧文件。注意,删除旧文件是,那些为了归档而创建的目录也会被删除-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %n%msg%n</pattern>
<charset>${charsetEncoding}</charset>
</encoder>
</appender>
<appender name="DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 过滤器,只记录WARN级别的日志 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!-- 最常用的滚动策略,它根据时间来制定滚动策略.既负责滚动也负责出发滚动 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志输出位置 可相对、和绝对路径 -->
<fileNamePattern>${log_dir}/%d{yyyy-MM-dd}/debug-log.log</fileNamePattern>
<!-- 可选节点,控制保留的归档文件的最大数量,超出数量就删除旧文件假设设置每个月滚动,且<maxHistory>是6,
则只保存最近6个月的文件,删除之前的旧文件。注意,删除旧文件是,那些为了归档而创建的目录也会被删除-->
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %n%msg%n</pattern>
<charset>${charsetEncoding}</charset>
</encoder>
</appender>
<logger name="org.springframework.web" level="INFO"/>
<!--myibatis log configure-->
<logger name="com.apache.ibatis" level="DEBUG"/>
<logger name="java.sql.Connection" level="DEBUG"/>
<logger name="java.sql.Statement" level="DEBUG"/>
<logger name="java.sql.PreparedStatement" level="DEBUG"/>
<logger name="org.springframework.jdbc" level="DEBUG"/>
<logger name="org.springframework.transaction" level="DEBUG"/>
<logger name="com.ars.datacenter.service.core.repository" level="DEBUG"/>
<root level="INFO">
<appender-ref ref="console"/>
<appender-ref ref="dailyRollingFileAppender"/>
<appender-ref ref="ERROR"/>
<appender-ref ref="WARN"/>
<appender-ref ref="INFO"/>
<appender-ref ref="DEBUG"/>
</root>
<!-- 请看这里:配置 atomikos 只打印error级别的日志-->
<logger name="com.atomikos">
<level value="error" />
</logger>
</configuration>
6、最后可能还会出现一个问题,关于代理的问题
比如:The bean 'xxx' could not be injected as a 'com.github.service.xx' because it is a JDK dynamic proxy
Description:
The bean 'userServiceImpl' could not be injected as a 'com.ysq.springboot.service.Impl.UserServiceImpl' >because it is a JDK dynamic proxy that implements:
com.ysq.springboot.service.UserService
Action:
Consider injecting the bean as one of its interfaces or forcing the use of CGLib-based proxies by setting >proxyTargetClass=true on @EnableAsync and/or @EnableCaching.
解决方法如下
主函数添加 @EnableTransactionManagement(proxyTargetClass=true)//开启事务 用CGLib代理方式
package com.ars.datacenter.service;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.web.support.SpringBootServletInitializer;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.transaction.annotation.EnableTransactionManagement;
/**
* 类描述
*
* @author fancl
* @since 2020/9/8 11:21
**/
@SpringBootApplication
@EnableTransactionManagement(proxyTargetClass=true)//开启事务 用CGLib代理方式
public class DataServiceApplication extends SpringBootServletInitializer {
public static void main(String[] args) {
SpringApplication.run(DataServiceApplication.class,args);
}
}