> 文章列表 > Hadoop Kerberos认证

Hadoop Kerberos认证

Hadoop Kerberos认证

文章目录

  • Kerberos认证
    • 环境说明
    • 时间同步
    • Kerberos部署
      • 客户端安装(每个节点都要安装)
      • 服务端安装(hadoop02节点)
      • krb5.conf配置(每个节点都要配置)
      • kdc.conf配置(仅hadoop02)
      • acl配置(仅hadoop02)
      • 初始化数据库(仅hadoop02)
      • 启动Kerberos 相关服务(仅hadoop02)
      • 创建 Kerberos 管理员用户和主体(仅hadoop02)
      • 客户端节点测试服务
    • Hadoop集成Kerberos
      • core-site.xml(仅hadoop02)
      • hdfs-site.xml(所有节点执行)
      • yarn-site.xml(仅hadoop02)
      • mapred-site.xml(仅hadoop02)
      • 分发配置
    • 配置 HDFS 使用 HTTPS 安全传输协议
      • 1.生成密钥
      • 2.修改密钥权限
      • 3.修改hadoop配置文件ssl-server.xml.example
      • 4. 将该证书和配置分发到集群中的每台节点的相同路径
    • 服务验证

Kerberos认证

环境说明

# 以下信息如果有变动,全局替换即可,当前设置hadoop02为主节点
# 当前服务器ip和主机别名信息如下
192.168.30.85   hadoop01
192.168.30.86   hadoop02
192.168.30.87   hadoop03
# 软件安装目录如果没有就创建,如果改成/opt/pkg,注意安装文档/opt/software全局替换成/opt/pkg
mkdir -p /opt/software
# hadoop_home 
/opt/software/hadoop-3.3.1
# 当前账号为root用户,若是其他用户,比如lxapp,全局替换root@为lxapp@

时间同步

# 分别查看每台节点的时间
date
# 如果时间差距过大,则使用date -s重新设置时间
date -s "20230421 14:59:30"

Kerberos部署

# hadoop02节点设置为服务端
# 检测是否安装依赖包
rpm -qa | grep krb5
# 依赖包和安装包下载地址
http://mirror.centos.org/centos/7/os/x86_64/Packages/libevent-2.0.21-4.el7.x86_64.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/libverto-libevent-0.2.5-4.el7.x86_64.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/words-3.0-22.el7.noarch.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/libkadm5-1.15.1-50.el7.x86_64.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/krb5-libs-1.15.1-50.el7.x86_64.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/krb5-workstation-1.15.1-50.el7.x86_64.rpm
http://mirror.centos.org/centos/7/os/x86_64/Packages/krb5-server-1.15.1-50.el7.x86_64.rpm

客户端安装(每个节点都要安装)

rpm -ivh libevent-2.0.21-4.el7.x86_64.rpm
rpm -ivh libverto-libevent-0.2.5-4.el7.x86_64.rpm
rpm -ivh words-3.0-22.el7.noarch.rpm
rpm -ivh krb5-libs-1.15.1-50.el7.x86_64.rpm
rpm -ivh libkadm5-1.15.1-50.el7.x86_64.rpm
rpm -ivh krb5-workstation-1.15.1-50.el7.x86_64.rpm

服务端安装(hadoop02节点)

rpm -ivh krb5-server-1.15.1-50.el7.x86_64.rpm

krb5.conf配置(每个节点都要配置)

# hadoop02节点为例,配置好可以分发给其他节点
vim /etc/krb5.conf# 分发配置好的krb5.conf给其他节点
scp -r /etc/krb5.conf root@hadoop01:/etc/
scp -r /etc/krb5.conf root@hadoop03:/etc/
 # Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/[logging]default = FILE:/var/log/krb5libs.logkdc = FILE:/var/log/krb5kdc.logadmin_server = FILE:/var/log/kadmind.log[libdefaults]dns_lookup_realm = falseticket_lifetime = 24hrenew_lifetime = 7dforwardable = truerdns = falsepkinit_anchors = FILE:/etc/pki/tls/certs/ca-bundle.crtdefault_realm = HADOOP.COM    #default_ccache_name要注释,不然'hadoop fs -ls /'会报如下异常#org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]#default_ccache_name = KEYRING:persistent:%{uid}udp_preference_limit=1[realms]# 域名称HADOOP.COM = {kdc = hadoop02   # kdc分发中心,也就是kerberos服务器所在,hadoop02节点admin_server = hadoop02}# 如果匹配到example.com或者.example.com都会映射成HADOOP.COM
[domain_realm].example.com = HADOOP.COMexample.com = HADOOP.COM

kdc.conf配置(仅hadoop02)

vim /var/kerberos/krb5kdc/kdc.conf[kdcdefaults]
kdc_ports = 88
kdc_tcp_ports = 88[realms]
EXAMPLE.COM = {#master_key_type = aes256-ctsacl_file = /var/kerberos/krb5kdc/kadm5.acldict_file = /usr/share/dict/wordsadmin_keytab = /var/kerberos/krb5kdc/kadm5.keytabsupported_enctypes = aes128-cts:normal des3-hmac-sha1:normal    arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal# 注意这里要修改的只有最后一行supported_enctypes,只需要删除aes256-cts:normal,删除的原因是留着它,需要java新导入一个包

acl配置(仅hadoop02)

vim /var/kerberos/krb5kdc/kadm5.acl
*/admin@HADOOP.COM *

初始化数据库(仅hadoop02)

# 设置密码(krb5kdc)
kdb5_util create -r HADOOP.COM -s# 查看生成文件
ll /var/kerberos/krb5kdc/
kadm5.acl
kdc.conf
principal
principal.kadm5
principal.kadm5.lock
principal.ok

启动Kerberos 相关服务(仅hadoop02)

# 启动KDC,并配置开机自启
systemctl start krb5kdc
systemctl enable krb5kdc
# 启动 Kadmin,该服务为 KDC 数据库访问入口
systemctl start kadmin
systemctl enable kadmin

创建 Kerberos 管理员用户和主体(仅hadoop02)

# 创建admin管理员用户
kadmin.local -q "addprinc admin/admin@HADOOP.COM" 
# 创建主体,-randkey:密码随机,因 hadoop 各服务均通过 keytab 文件认证,故密码可随机生成
kadmin.local -q "addprinc -randkey test/test"
# 将主体密钥写入keytab文件中
mkdir -p /opt/software/security/keytab
kadmin.local -q "xst -k /opt/software/security/keytab/test.keytab test/test"
# 密钥文件夹赋权
chmod 770 /opt/software/security/keytab/
chmod 660 /opt/software/security/keytab/*
# 分发密钥到其他节点(若其他节点没有该文件夹则创建)
mkdir -p /opt/software/security/keytab      
chmod 770 /opt/software/security/keytab/
scp -r /opt/software/security/keytab/* root@hadoop01:/opt/software/security/keytab/
scp -r /opt/software/security/keytab/* root@hadoop03:/opt/software/security/keytab/

客户端节点测试服务

# kadmin.local命令行操作
kadmin.local
# 查看用户(listprincs)
kadmin.local -q "listprincs" 
# 以下二选一服务测试验证
# 输入初始化数据库设置的密码验证
kinit admin/admin
# 远程连接kadmin,输入初始化数据库设置的密码验证
kadmin -p admin/admin
# keytab密钥认证
kinit -kt /opt/software/security/keytab/test.keytab test/test

Hadoop集成Kerberos

# 以下标记所有节点执行的所有节点执行(因为配置文件内容不一样),仅hadoop02主节点执行,后续会分发

core-site.xml(仅hadoop02)

vim /opt/software/hadoop-3.3.1/etc/hadoop/core-site.xml
<!-- 启用Hadoop集群Kerberos安全认证 -->
<property><name>hadoop.security.authentication</name><value>kerberos</value>
</property><!-- 启用Hadoop集群授权管理 -->
<property><name>hadoop.security.authorization</name><value>true</value>
</property>

hdfs-site.xml(所有节点执行)

vim /opt/software/hadoop-3.3.1/etc/hadoop/hdfs-site.xml
<!-- NameNode服务的Kerberos主体,_HOST会自动解析为服务所在的主机名 -->
<property><name>dfs.namenode.kerberos.principal</name><value>test/test@HADOOP.COM</value>
</property><!-- NameNode服务的Kerberos密钥文件路径 -->
<property><name>dfs.namenode.keytab.file</name><value>/opt/software/security/keytab/test.keytab</value>
</property><!-- Secondary NameNode服务的Kerberos主体 -->
<property><name>dfs.secondary.namenode.keytab.file</name><value>/opt/software/security/keytab/test.keytab</value>
</property><!-- Secondary NameNode服务的Kerberos密钥文件路径 -->
<property><name>dfs.secondary.namenode.kerberos.principal</name><value>test/test@HADOOP.COM</value>
</property><!-- 访问DataNode数据块时需通过Kerberos认证 -->
<property><name>dfs.block.access.token.enable</name><value>true</value>
</property><!-- DataNode服务的Kerberos主体 -->
<property><name>dfs.datanode.kerberos.principal</name><value>test/test@HADOOP.COM</value>
</property><!-- DataNode服务的Kerberos密钥文件路径 -->
<property><name>dfs.datanode.keytab.file</name><value>/opt/software/security/keytab/test.keytab</value>
</property><!-- 配置DataNode数据传输保护策略为仅认证模式 -->
<property><name>dfs.data.transfer.protection</name><value>authentication</value>
</property>

yarn-site.xml(仅hadoop02)

vim /opt/software/hadoop-3.3.1/etc/hadoop/yarn-site.xml
<!-- Resource Manager 服务的Kerberos主体 -->
<property><name>yarn.resourcemanager.principal</name><value>test/test@HADOOP.COM</value>
</property><!-- Resource Manager 服务的Kerberos密钥文件 -->
<property><name>yarn.resourcemanager.keytab</name><value>/opt/software/security/keytab/test.keytab</value>
</property><!-- Node Manager 服务的Kerberos主体 -->
<property><name>yarn.nodemanager.principal</name><value>test/test@HADOOP.COM</value>
</property><!-- Node Manager 服务的Kerberos密钥文件 -->
<property><name>yarn.nodemanager.keytab</name><value>/opt/software/security/keytab/test.keytab</value>
</property>

mapred-site.xml(仅hadoop02)

vim /opt/software/hadoop-3.3.1/etc/hadoop/mapred-site.xml
<!-- 历史服务器的Kerberos主体 -->
<property><name>mapreduce.jobhistory.keytab</name><value>/opt/software/security/keytab/test.keytab</value>
</property><!-- 历史服务器的Kerberos密钥文件 -->
<property><name>mapreduce.jobhistory.principal</name><value>test/test@HADOOP.COM</value>
</property>

分发配置

scp -r /opt/software/hadoop-3.3.1/etc/hadoop/core-site.xml root@hadoop01:/opt/software/hadoop-3.3.1/etc/hadoop/
scp -r /opt/software/hadoop-3.3.1/etc/hadoop/core-site.xml root@hadoop03:/opt/software/hadoop-3.3.1/etc/hadoop/scp -r /opt/software/hadoop-3.3.1/etc/hadoop/yarn-site.xml root@hadoop01:/opt/software/hadoop-3.3.1/etc/hadoop/
scp -r /opt/software/hadoop-3.3.1/etc/hadoop/yarn-site.xml root@hadoop03:/opt/software/hadoop-3.3.1/etc/hadoop/scp -r /opt/software/hadoop-3.3.1/etc/hadoop/mapred-site.xml root@hadoop01:/opt/software/hadoop-3.3.1/etc/hadoop/
scp -r /opt/software/hadoop-3.3.1/etc/hadoop/mapred-site.xml root@hadoop03:/opt/software/hadoop-3.3.1/etc/hadoop/

配置 HDFS 使用 HTTPS 安全传输协议

  • hadoop02节点执行

1.生成密钥对

mkdir -p /opt/software/security/https
keytool -keystore /opt/software/security/https/keystore -alias jetty -genkey -keyalg RSA输入密钥库口令:  feisuan
再次输入新口令:  feisuan
# 以下信息可不填,直接回车
您的名字与姓氏是什么?[Unknown]:  
您的组织单位名称是什么?[Unknown]:  
您的组织名称是什么?[Unknown]:  
您所在的城市或区域名称是什么?[Unknown]:  
您所在的省/市/自治区名称是什么?[Unknown]:  
该单位的双字母国家/地区代码是什么?[Unknown]:  
CN=Unknown, OU=Unknown, O=Unknown, L=Unknown, ST=Unknown, C=Unknown是否正确?[]:  y输入 <jetty> 的密钥口令
(如果和密钥库口令相同, 按回车):  
再次输入新口令:# 查看密钥库内容
keytool -keystore /opt/software/security/https/keystore -list

2.修改密钥权限

chmod 660 /opt/software/security/https/keystore

3.修改hadoop配置文件ssl-server.xml.example

cp /opt/software/hadoop-3.3.1/etc/hadoop/ssl-server.xml.example /opt/software/hadoop-3.3.1/etc/hadoop/ssl-server.xml
vim /opt/software/hadoop-3.3.1/etc/hadoop/ssl-server.xml
<!-- SSL密钥库路径 -->
<property><name>ssl.server.keystore.location</name><value>/opt/software/security/https/keystore</value>
</property><!-- SSL密钥库密码 -->
<property><name>ssl.server.keystore.password</name><value>feisuan</value>
</property><!-- SSL可信任密钥库路径 -->
<property><name>ssl.server.truststore.location</name><value>/opt/software/security/https/keystore</value>
</property><!-- SSL密钥库中密钥的密码 -->
<property><name>ssl.server.keystore.keypassword</name><value>feisuan</value>
</property><!-- SSL可信任密钥库密码 -->
<property><name>ssl.server.truststore.password</name><value>feisuan</value>
</property>

4. 将该证书和配置分发到集群中的每台节点的相同路径

# 没有目录则创建
mkdir -p /opt/software/security/https
scp -r /opt/software/security/https/keystore root@hadoop01:/opt/software/security/https
scp -r /opt/software/security/https/keystore root@hadoop03:/opt/software/security/https
scp -r /opt/software/hadoop-3.3.1/etc/hadoop/ssl-server.xml root@hadoop01:/opt/software/hadoop-3.3.1/etc/hadoop/
scp -r /opt/software/hadoop-3.3.1/etc/hadoop/ssl-server.xml root@hadoop02:/opt/software/hadoop-3.3.1/etc/hadoop/

服务验证

# keytab 认证
kinit -kt /opt/software/security/keytab/test.keytab test
# 查看认证凭证
klist 
# 浏览hdfs文件目录是否功能正常
$HADOOP_HOME/bin/hadoop fs -ls /
# 销毁凭证
kdestroy
# 销毁凭证后浏览hdfs文件目录是否功能正常
$HADOOP_HOME/bin/hadoop fs -ls /