salt 结合vault为k8s生成证书
-
Salt Summary
- 配置管理系统,能够将一个集群中所有远程节点的各种配置信息维护在自定义的状态(例如,确保安装特定的软件包并运行特定的服务,确保集群中某项统配置保持统一标准)
- 分布式远程执行系统,可以通过在salt master上通过命令查询某个远程节点或带任意标签的一批远程节点的某项数据,执行某个命令(例如:查询集群中roles标签为kubernetes-node的所有远程节点上某个插件的版本)
-
配置salt runner
- 在salt master上/etc/salt/master.d/创建k8s_pki.conf配置文件,写入salt runner脚本调用vault api接口时需要的参数
root@Caden-dev:/etc/salt/master.d# cat k8s_pki.conf k8s_pki: lpt: url: http://127.0.0.1:8200 mount_path: pki/k8s-lpt # 不同环境使用不同secrets路径 token: xxxxxx-xxxxxx-xxxx-xxxx-xxxxxx # vault init时提供的root token fat: url: http://127.0.0.1:8200 mount_path: pki/k8s-fat token: xxxxxx-xxxxxx-xxxx-xxxx-xxxxxx uat: url: http://127.0.0.1:8200 mount_path: pki/k8s-uat token: xxxxxx-xxxxxx-xxxx-xxxx-xxxxxx
- 在salt master的file_roots目录下创建_runner目录,并在该目录下创建salt runner脚本k8s_pki.py
root@Caden-dev: /etc/salt/master.d# cat opts.conf cli_summary: true file_roots: base: - /srv/salt/container/salt # file_roots的base路径 pillar_roots: base: - /srv/salt/container/pillar auto_accept: True log_level: info
- 创建salt runner脚本(以kubelet为例)
root@Caden-dev: /srv/salt/container/salt/_runner/# cat k8s_pki.py """A runner to manage kubernetes certificates /etc/salt/master.d/k8s_pki.conf k8s_pki: default: url: http://127.0.0.1:8200 mount_path: pki/k8s token: <token> usage: salt '<minion>' pillar.items salt-run k8s_pki.kubelet '<minion>' test=false force=false cluster=<default> """ from __future__ import absolute_import import os.path import requests import salt import salt.client import salt.utils.minions def _ip_words(ip): return tuple(int(w) for w in ip.split('.')) def _default_ip4(grains): ret = [] for ip in grains['fqdn_ip4']: w = _ip_words(ip) if len(w) != 4: continue if w[:3] == (10, 0, 108): continue if w[:2] == (10, 244): continue ret.append(ip) if len(ret) != 1: raise ValueError('Unable to get default IP from {}, found {}'.format(grains['fqdn_ip4'], ret)) return ret[0] def _request(method, path, data=None, context='default'): cfg = __opts__['k8s_pki'][context] # fetch vault needed parameters for k in ['url', 'mount_path']: cfg[k] = cfg[k].rstrip('/') headers = {'X-Vault-Token': cfg['token']} mount_path = cfg['mount_path'] url = '{}/v1/{}/{}'.format(cfg['url'], cfg['mount_path'], path) resp = requests.request(method, url, headers=headers, json=data) if resp.status_code != requests.codes.ok: print resp.json() resp.raise_for_status() return resp def _stat(client, minion_id, path): stat = client.cmd(minion_id, 'file.stats', [path]) ret = stat.get(minion_id, {}) if not ret: return ret else: return { 'target': ret.get('target'), 'size': ret.get('size'), 'user': ret.get('user'), 'group': ret.get('group'), } def _set_file_props(client, minion_id, path, mode, user, group): ret1 = client.cmd(minion_id, 'file.chown', [path, user, group]) ret2 = client.cmd(minion_id, 'file.set_mode', [path, mode]) return ret1.get(minion_id), ret2.get(minion_id) def _write(client, minion_id, path, data, mode, user, group): ret = [] res = client.cmd(minion_id, 'file.write', [path, data]) ret.append(res.get(minion_id)) ret += _set_file_props(client, minion_id, path, mode, user, group) return ret def _issue_single(client, minion_id, payload, role, context, cert_dir, cert_basename, user, group, test, force, cert_mode='644', key_mode='400'): _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) cert_path = os.path.join(cert_dir, cert_basename + '.crt') key_path = os.path.join(cert_dir, cert_basename + '.key') cert_stat = _stat(client, minion_id, cert_path) key_stat = _stat(client, minion_id, key_path) ret = { 'payload': payload, 'cert_path': cert_path, 'key_path': key_path, 'cert_exists': bool(cert_stat), 'key_exists': bool(key_stat), } if test: ret.update({ 'success': False, 'comment': 'test=true', }) return ret if not force: if cert_stat or key_stat: ret.update({ 'success': False, 'comment': 'cert/key already exists, set force=true.', }) return ret resp = _request('POST', 'issue/' + role.strip('/'), payload, context=context) # generate crt and key through vault api data = resp.json()['data'] results = [] results += _write(client, minion_id, cert_path, data['certificate'], cert_mode, user, group) # write crt to target minion results += _write(client, minion_id, key_path, data['private_key'], key_mode, user, group) # write key to target minion ret.update({ 'success': True, 'results': results, }) return ret def _sans(client, minions, extra_alt_names=[], extra_ip_sans=[]): alt_names = set() ip_sans = set() for minion_id in minions: _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) alt_names.add(grains['fqdn']) if grains['host'] != grains['fqdn']: alt_names.add(grains['host']) ip_sans.add(_default_ip4(grains)) alt_names.add('localhost') ip_sans.add('127.0.0.1') alt_names = alt_names.union(set(extra_alt_names)) ip_sans = ip_sans.union(set(extra_ip_sans)) return ','.join(sorted(alt_names)).lower(), ','.join(sorted(ip_sans)) def kubelet(tgt, tgt_type='glob', ttl='8760h', context='default', user='root', group='root', cert_dir='/etc/kubernetes/pki', test=True, force=False): client = salt.client.get_local_client(__opts__['conf_file']) minions = client.gather_minions(tgt, tgt_type) ret = {} for minion_id in sorted(minions): alt_names, ip_sans = _sans(client, [minion_id]) _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) payload = { 'common_name': 'system:node:{}'.format(grains['host'].lower()), 'exclude_cn_from_sans': 'true', 'alt_names': alt_names, 'ip_sans': ip_sans, 'ttl': ttl, } ret[minion_id] = _issue_single(client, minion_id, payload, role='kubelet', context=context, cert_dir=cert_dir, cert_basename='kubelet', user=user, group=group, test=test, force=force) return ret
-
导入k8s集群root ca证书
手动生成root ca证书或者从现有的k8s集群将k8s-master节点上的/etc/kubernetes/pki/ca.crt和/etc/kubernetes/pki/ca.key文件合并成一个ca.bundle,再将ca.bundle写入vault
vault write pki/$mount_path/config/ca pem_bundle=@/$yourpath/ca.bundle
-
执行salt-run命令为k8s node生成证书
此处为test=true,实际执行test=false即可
root@Caden-dev:~# salt-run k8s_pki.kubelet k8s-lpt-node02 context=lpt test=true k8s-lpt-node02: ---------- cert_exists: True cert_path: /etc/kubernetes/pki/kubelet.crt comment: test=true key_exists: True key_path: /etc/kubernetes/pki/kubelet.key payload: ---------- alt_names: k8s-lpt-node02,localhost common_name: system:node:k8s-lpt-node02 exclude_cn_from_sans: true ip_sans: 10.18.14.112,127.0.0.1 ttl: 8760h success: False [INFO ] Runner completed: 20180805160303808016