scp hdfs_ca_key hdfs_ca_cert $host:/tmp
3).发送完成后删除ha01节点上CA证书
rm -rf hdfs_ca_key hdfs_ca_cert
4).在每一台机器上生成keystore和trustores(注意:集群中每个节点都需要执行以下命令)
4.1) 生成keystore,这里的keytool需要java环境,否则command not found
name="CN=$HOSTNAME, OU=hlk, O=hlk, L=xian, ST=shanxi, C=CN"
#需要输入第一步输入的密码四次
keytool -keystore keystore -alias localhost -validity 9999 -genkey -keyalg RSA -keysize 2048 -dname "$name"
4.2) 添加CA到truststore,同样需要输入密码
keytool -keystore truststore -alias CARoot -import -file hdfs_ca_cert
4.3) 从keystore中导出cert
keytool -certreq -alias localhost -keystore keystore -file cert
4.4) 用CA对cert签名
openssl x509 -req -CA hdfs_ca_cert -CAkey hdfs_ca_key -in cert -out cert_signed -days 9999 -CAcreateserial
4.5) 将CA的cert和用CA签名之后的cert导入keystore
keytool -keystore keystore -alias CARoot -import -file hdfs_ca_cert
keytool -keystore keystore -alias localhost -import -file cert_signed
4.6) 将最终keystore,trustores放入合适的目录,并加上后缀jks
mkdir -p /etc/security/https && chmod 755 /etc/security/https
cp keystore /etc/security/https/keystore.jks
cp truststore /etc/security/https/truststore.jks
4.7) 删除/tmp目录下产生的垃圾数据文件
rm -f keystore truststore hdfs_ca_key hdfs_ca_cert.srl hdfs_ca_cert cert_signed cert
5).配置$HADOOP_HOME/etc/hadoop/ssl-server.xml和ssl-client.xml文件
注:这两个配置文件在一台节点配好,发送到其他节点对应位置下!
5.1) 配置$HADOOP_HOME/etc/hadoop/ssl-client.xml文件
################################ ssl-client.xml #########################################
ssl.client.truststore.location
/etc/security/https/truststore.jks
Truststore to be used by clients like distcp. Must be specified.
ssl.client.truststore.password
hadoop
Optional. Default value is "".
ssl.client.truststore.type
jks
Optional. The keystore file format, default value is "jks".