第一步:构建ik-solr
# 删除镜像
docker rm -f solr
# 拉取最新的solr镜像
docker pull solr:latest
# 创建一个solr的volume
docker volume create solr_data
# 运行solr
docker run --name solr -p 8983:8983 -v solr_data:/var/solr -d -t solr:latest
# 复制jar 到lib下
docker cp /mnt/cache/solr/load/ik-analyzer-8.3.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/ik-analyzer-solr7-7.x.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/solr-dataimporthandler-8.4.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/solr-dataimporthandler-extras-8.4.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/mysql-connector-java-8.0.22.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
# 新建文件夹 classes
docker exec -it --user=root solr /bin/bash
cd server/solr-webapp/webapp/WEB-INF
mkdir classes
exit
# 复制字典
docker cp /mnt/cache/solr/load/IKAnalyzer.cfg.xml solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/dynamicdic.txt solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/ext.dic solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/ik.conf solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/stopword.dic solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
# 保存为image
docker commit solr solr-ik-mysql:1.0
# 单机部署到这里就结束了,如果希望使用集群的请继续下面的代码
docker rm -f solr
docker run --name solr -p 8983:8983 -v solr_data:/var/solr -e SOLR_HEAP=6G -d -t solr-ik-mysql:1.0
第二步
docker rm -f solr
docker run --name solr -p 8983:8983 -v solr_data:/var/solr -e SOLR_HEAP=6G -d -t solr-ik-mysql:1.0
到此处docker 单机版的solr 搭建完,下面是swarm版
第三步 配置好 stacks
#docker-compose.yaml
version: '3'
services:
zoo1:
image: zookeeper:3.6.1
restart: always
hostname: zoo1
ports:
- 2181:2181
volumes:
- /home/zk/zookeeper1/data:/data
- /home/zk/zookeeper1/datalog:/datalog
- /home/zk/zookeeper1/logs:/logs
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
networks:
zookeeper_default:
deploy:
placement:
constraints:
- node.hostname == worker1
zoo2:
image: zookeeper:3.6.1
restart: always
hostname: zoo2
ports:
- 2182:2181
volumes:
- /home/zk/zookeeper2/data:/data
- /home/zk/zookeeper2/datalog:/datalog
- /home/zk/zookeeper2/logs:/logs
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
networks:
zookeeper_default:
deploy:
placement:
constraints:
- node.hostname == worker1
zoo3:
image: zookeeper:3.6.1
restart: always
hostname: zoo3
ports:
- 2183:2181
volumes:
- /home/zk/zookeeper3/data:/data
- /home/zk/zookeeper3/datalog:/datalog
- /home/zk/zookeeper3/logs:/logs
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
networks:
zookeeper_default:
deploy:
placement:
constraints:
- node.hostname == worker1
solr1:
image: solr-ik:1.0
ports:
- "28983:8983"
container_name: solr1
restart: always
volumes:
- solr_cluster1:/var/solr
environment:
ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
SOLR_HEAP: 6G
depends_on:
- zoo1
- zoo2
- zoo3
deploy:
placement:
constraints:
- node.hostname == manager
solr2:
image: solr-ik:1.0
ports:
- "28984:8983"
container_name: solr2
restart: always
volumes:
- solr_cluster2:/var/solr
environment:
ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
SOLR_HEAP: 6G
depends_on:
- zoo1
- zoo2
- zoo3
deploy:
placement:
constraints:
- node.hostname == manager
solr3:
image: solr-ik:1.0
ports:
- "28985:8983"
container_name: solr3
restart: always
volumes:
- solr_cluster3:/var/solr
environment:
ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
SOLR_HEAP: 6G
depends_on:
- zoo1
- zoo2
- zoo3
deploy:
placement:
constraints:
- node.hostname == manager
volumes:
solr_cluster1:
solr_cluster2:
solr_cluster3:
第四步 创建一个默认的collection
#创建一个默认的collection,以后就可以用这个作为基础;
docker exec -it 841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217 /opt/solr/bin/solr create_collection -c zc_company -shards 2 -replicationFactor 3 -p 8983
#删除 collection
docker exec -it 841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217 /opt/solr/bin/solr delete -c collection1 -p 8983
841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217 为容器id
第五 集群配置文件
将resources目录下的IKAnalyzer.cfg.xml、ext.dic、stopword.dic放入solr服务的Jetty或Tomcat的webapp/WEB-INF/classes/目录下;
① IKAnalyzer.cfg.xml (IK默认的配置文件,用于配置自带的扩展词典及停用词典)
② ext.dic (默认的扩展词典)
③ stopword.dic (默认的停词词典)
注意:与单机版不同,ik.conf及dynamicdic.txt请不要放在classes目录下!
将resources目录下的ik.conf及dynamicdic.txt放入solr配置文件夹中,与solr的managed-schema文件同目录中;
配置Solr的managed-schema,添加ik分词器,示例如下;
<!-- ik分词器 -->
<fieldType name="text_ik" class="solr.TextField">
<analyzer type="index">
<tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="false" conf="ik.conf"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="true" conf="ik.conf"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
第六 提交配置到zk
比如我们修改schema.xml配置文件之后,根本不用登录zookeeper删除原有文件,文件会自动覆盖,这里直接上传即可,命令如下:
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/managed-schema -confname zc_company/managed-schema
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/data-config.xml -confname zc_company/data-config.xml
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/dynamicdic.txt -confname zc_company/dynamicdic.txt
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/solrconfig.xml -confname zc_company/solrconfig.xml
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/stopword.dic -confname zc_company/stopword.dic
docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/ik.conf -confname zc_company/ik.conf
备份恢复
参考: https://solr.apache.org/guide/6_6/making-and-restoring-backups.html
# 定期备份 每个 节点都要执行
http://192.168.0.105:28983/solr/zc_company/replication?command=backup&location=/var/solr/data/&name=aaa.
# 备份状态
http://192.168.0.105:28983/solr/zc_company/replication?command=details
# 备份恢复
http://192.168.0.105:28983/solr/zc_company/replication?command=restore&name=backup_name
导出
http://192.168.0.105:28983/solr/zc_company/select?q.op=OR&q=%3A&rows=100000000&start=0&wt=csv
点波关注 系统搭建(docker)