1.软件版本介绍

本文仅提供大数据单节点环境的软件安装及配置,供初学者学习使用


软件名安装包名
Mysqlmysql57-community-release-el7-9.noarch.rpm
JDKjdk-8u171-linux-x64.tar.gz
Hadoophadoop-3.1.3.tar.gz
Hiveapache-hive-3.1.2-bin.tar.gz
Zeppelinzeppelin-0.8.2-bin-all.tgz
Zookeeperapache-zookeeper-3.5.7-bin.tar.gz
HBasehbase-2.3.5-bin.tar.gz
Sqoopsqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz
Flumeapache-flume-1.9.0-bin.tar.gz
Scalascala-2.12.10.tgz
Sparkspark-3.1.2-bin-hadoop3.2.tgz
Kafkakafka_2.12-2.8.0.tgz

软件已上传百度网盘,有需要的自领
链接:https://pan.baidu.com/s/1PLb6pBcnemUJLI3uLdY4Ag
提取码:hyye

#在opt目录下创建download文件夹用于存放安装包,创建software文件夹作为软件安装目录
mkdir /opt/download
mkdir /opt/software

2.Mysql安装及配置

2.1Mysql安装

#Mysql这里采用的是yum安装
#1.下载mysql
wget https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm
#2.repo安装
rpm -ivh mysql57-community-release-el7-9.noarch.rpm
#3.Mysql服务安装
yum -y install mysql-server

2.2配置字符集

vim /etc/my.cnf

将如下配置信息覆盖粘贴

[client]
default-character-set = utf8mb4

[mysql]
default-character-set = utf8mb4

[mysqld]
character-set-client-handshake = FALSE
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci

2.3修改密码

# 命令行获取临时密码
grep 'temporary password' /var/log/mysqld.log
2021-05-25T04:15:11.018065Z 1 [Note] A temporary password is generated for root@localhost: AzcyCnsul3+-
# 利用临时密码打开mysql
mysqld -u root -AzcyCnsul3+-
#如果/var/log/mysqld.log获取不了密码
#通过以下途径设置免密登录
#1、关闭mysql服务
systemctl stop mysqld
#2、修改匹配文件启动免密登录
vim /etc/my.cnf
[mysqld]
skip-grant-tables
#3、启动mysql服务
systemctl start mysqld
#4、免密登录
mysql # 回车后登录
#5、立即修改root账号密码后退出mysql
use mysql;
update user set password/authentication_string=password('1234') 
where user = 'root';
exit;
#6、再次关闭mysql服务,删除配置信息skip-grant-tables后启动mysql,
# 可以使用之前修改密码
#7、必须在进行其他操作前修改符合要求的操作
alter user 'root'@'localhost' identified by '@12A3a4';
#7.1、修改秘密难度等级
set global validate_password_policy=0;
flush privileges;
#7.2、再次重该密码,最小长度8
alter user 'root'@'localhost' identified by '12345678';
#8、启动远程访问
grant all on *.* to root@'%' identified by '12345678';


3.JDK安装及配置

3.1安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装JDK至/opt/software目录中
tar -zxvf jdk-8u171-linux-x64.tar.gz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/jdk-8u171-linux-x64 /opt/software/jdk

3.2配置环境变量

#1.配置环境变量
#profile为主环境变量文件:【不推荐该环境变量配置方式】
#profile.d/myenv.sh中配置环境变量
vim /etc/profile.d/myenv.sh

#jdk
JAVA_HOME=/opt/software/java/jdk
PATH=$JAVA_HOME/bin:$PATH
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

#2.激活环境变量
source /etc/profile
#检查环境变量是否配置成功
echo $JAVA_HOME
#显示该路径/opt/software/java/,则环境变量激活成功

4.Hadoop安装及配置

4.1安装Hadoop前需要对本机先做免密登录

cd ~
ssh-keygen -t rsa
ssh root@主机名	#可以通过hostname命令查看
#获得公钥
cat id_rsa.pub>>authorized_keys
#免密登录
ssh root@主机名

4,2解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Hadoop至/opt/software目录中
tar -zxvf hadoop-3.1.3.tar.gz -C /opt/software/hadoop
#3.重命名,方便配置环境变量
mv /opt/software/hadoop-3.1.3 /opt/software/hadoop313

4.3配置环境变量

vim /etc/profile.d/myenv.sh

#hadoop
export HADOOP_HOME=/opt/software/hadoop313
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_CONF_DIR=$HADOOP_HOME
export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop


#激活环境变量
source /etc/profile

4.4配置文件

cd /opt/software/hadoop313/
mkdir data
cd /opt/software/hadoop/etc/hadoop/

修改hadoop-env.sh配置文件

<configuration>
	<!--配置namenode的地址-->
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://192.168.64.180:9820</value>
	</property>
	<!--配置数据存储目录-->
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/opt/software/hadoop/hadoop313/data</value>
	</property>
	<!--配置HDFS网页登录使用的静态用户root-->
	<property>
		<name>hadoop.http.staticuser.user</name>
		<value>root</value>
	</property>
	<!--配置root超级用户允许通过代理访问主机节点-->
	<property>
		<name>hadoop.proxyuser.root.hosts</name>
		<value>*</value>
	</property>
	<!--配置root超级用户允许通过代理用户所属组-->
	<property>
		<name>hadoop.proxyuser.root.groups</name>
		<value>*</value>
	</property>
	<!--配置root超级用户允许通过代理的用户-->
	<property>
		<name>hadoop.proxyuser.root.user</name>
		<value>*</value>
	</property>
</configuration>

修改hdfs-site.xml配置文件

<configuration>
	<!-- 配置namenode web访问地址-->
	<property>
		<name>dfs.namenode.http-address</name>
		<value>192.168.64.180:9870</value>
	</property>
	<!-- 配置secondary namenode web访问地址-->
	<property>
		<name>dfs.namenode.secondary.http-address</name>
		<value>192.168.64.180:9868</value>
	</property>
	<!-- 配置hdfs副本数量-->
	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>
</configuration>

修改yarn-site.xml配置文件

<configuration>
	<!--配置mr的执行方式-->
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
	<!--配置resourcemanager的地址-->
	<property>
		<name>yarn.resourcemanager.hostname</name>
		<value>singlechen</value>
	</property>
	<!--配置环境变量的继承-->
	<property>
		<name>yarn.nodemanager.env-whitelist</name>
		<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,
		HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
	<!--配置yarn容器允许分配的最小内存-->
	<property>
		<name>yarn.scheduler.minimum-allocation-mb</name>
		<value>256</value>
	</property>
	<!--配置yarn容器允许分配的最大内存-->
	<property>
		<name>yarn.scheduler.maximum-allocation-mb</name>
		<value>1536</value>
	</property>
	<!--配置yarn容器允许管理的物理内存大小-->
	<property>
		<name>yarn.nodemanager.resource.memory-mb</name>
		<value>1536</value>
	</property>
	<property>
        <name>yarn.app.mapreduce.am.resource.mb</name>
        <value>256</value>
    </property>
	<!--配置关闭yarn对物理内存和虚拟内存的限制检查,
		jdk运行于centos6以上版本会导致虚拟内存过大-->
	<property>
		<name>yarn.nodemanager.pmem-check-enabled</name>
		<value>false</value>
	</property>
	<!--配置关闭yarn对物理内存和虚拟内存的限制检查-->
	<property>
		<name>yarn.nodemanager.vmem-check-enabled</name>
		<value>false</value>
	</property>
	<!--开启日志聚集-->
	<property>
		<name>yarn.log-aggregation-enable</name>
		<value>true</value>
	</property>
	<!--配置日志聚集服务器地址-->
	<property>
		<name>yarn.log.server.url</name>
		<value>http://192.168.64.180:19888/jobhistory/logs</value>
	</property>
	<!--配置日志保留时间为7天-->
	<property>
		<name>yarn.log-aggregation.retain-seconds</name>
		<value>604800</value>
	</property>
	<property>
		<name>yarn.application.classpath</name>
		<value>复制的Hadoop classpath</value>
	</property>
</configuration>

修改mapred-site.xml

<configuration>
	<!--配置mapreduce运行与yarn上:默认为local,也可只当spark阶段了解的mesos-->
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
	<!--配置历史服务器地址-->
	<property>
		<name>mapreduce.jobhistory.address</name>
		<value>192.168.64.180:10020</value>
	</property>
	<!--配置历史服务器web端地址-->
	<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>192.168.64.180:19888</value>
	</property>
	 <property>
        <name>yarn.app.mapreduce.am.env</name> 
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <property>
        <name>mapreduce.map.env</name>
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <property>
        <name>mapreduce.reduce.env</name>
		<value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
</configuration>

4.5namenode格式化

cd /opt/software/hadoop/hadoop313/bin
./hdfs namenode -format

4.6启动服务

start-dfs.sh
start-yarn.sh
	
#通过jps命令查看当前启动的服务
[root@singlechen hadoop]# jps
4562 NodeManager
5490 Jps
4132 SecondaryNameNode
3881 DataNode
3739 NameNode
4412 ResourceManager

5.Hive安装及配置
5.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Hive至/opt/software目录中
tar -zxvf apache-hive-3.1.2-bin.tar.gz -C /opt/software/hive/
#3.重命名,方便配置环境变量
mv /opt/software/apache-hive-3.1.2-bin/ /opt/software/hive312

5.2配置环境变量

vim /etc/profile.d/myenv.sh

export HIVE_HOME=/opt/software/hive312
export PATH=$HIVE_HOME/bin:$PATH

#激活环境变量
source /etc/profile

5.3修改配置文件

cd /opt/software/hive312/conf
mv hive-default.xml.template hive-default.xml

修改hive-default.xml配置文件

	<?xml version="1.0" encoding="UTF-8" standalone="no"?>
	<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
	<configuration>
	  <property>
		<name>javax.jdo.option.ConnectionURL</name>
		<value>jdbc:mysql://localhost:3306/hive312?createDatabaseIfNotExist=true</value>
		<description>connect to mysql for hive metastore</description>
	  </property>
	  <property>
		<name>javax.jdo.option.ConnectionDriverName</name>
		<value>com.mysql.jdbc.Driver</value>
		<description>driver for mysql</description>
	  </property>
	  <property>
		<name>javax.jdo.option.ConnectionUserName</name>
		<value>root</value>
		<description>username to mysql</description>
	  </property>
	  <property>
		<name>javax.jdo.option.ConnectionPassword</name>
		<value>12345678</value>
		<description>password to mysql</description>
	  </property>
	</configuration>

5.4拷贝资源jar包

cd /opt/software/hive312/lib
#查找Hadoop中的guava jar包路径
find /opt/software/hadoop/hadoop313/ -name 'guava*.jar'
#拷贝至hive 的lib库中
cp /opt/software/hadoop313/share/hadoop/common/lib/guava-27.0-jre.jar ./

5.5初始化

cd /opt/software/hive312/bin
./schematool -dbType mysql -initSchema

5.6启动服务

nohup hive --service metastore>/dev/null 2>&1 &
nohup hive --service hiveserver2>/dev/null 2>&1 &

5.7启客户端

beeline -u jdbc:hive2://192.168.64.180:10000

6.zeppelin安装及配置

6.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装zeppelin至/opt/software目录中
tar -zxvf zeppelin-0.8.2-bin-all.tgz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/zeppelin-0.8.2-bin-all /opt/software/zeppelin082

6.2配置环境变量

vim /etc/profile.d/myenv.sh

#zeppelin
export ZEPPELIN_HOME=/opt/software/zeppelin082
export PATH=$PATH:$ZEPPELIN_HOME/bin:$PATH

#激活环境变量
source /etc/profile

6.3修改配置文件

cd /opt/software/zeppelin082/conf
mv zeppelin-env.sh.template zeppelin-env.sh
mv zeppelin-site.xml.template zeppelin-site.xml

修改zeppelin-env.sh执行文件

export JAVA_HOME=/opt/software/java/jdk180
export HADOOP_CONF_DIR=/opt/software/hadoop/hadoop313

修改zeppelin-site.xml配置文件

<property>
		<name>zeppelin.server.addr</name>
		<value>192.168.64.180</value>
		<description>Server binding address</description>
	</property>
	<property>
	  <name>zeppelin.server.port</name>
	  <value>8000</value>
	  <description>Server port.</description>
	</property>

6.4修改Hive关联的hdfs文件夹权限

hdfs dfs -chmod -R 777 /tmp

6.5配置hive解释器

	#拷贝hive-site.xml至zeppelin的conf目录
	cd /opt/software/zeppelin082/conf
	cp /opt/software/hive312/conf/hive-site.xml ./
	#jar包拷贝
	cd /opt/software/zeppelin/zeppelin082/interpreter/jdbc/
	cp /opt/software/hadoop/hadoop313/share/hadoop/common/hadoop-common-3.1.3.jar ./
	cp /opt/software/hive/hive312/lib/hive-jdbc-3.1.2.jar ./
	cp /opt/software/hive/hive312/lib/hive-common-3.1.2.jar ./
	cp /opt/software/hive/hive312/lib/hive-serde-3.1.2.jar ./
	cp /opt/software/hive/hive312/lib/hive-service-rpc-3.1.2.jar ./
	cp /opt/software/hive/hive312/lib/hive-service-3.1.2.jar ./
	cp /opt/software/hive/hive312/lib/curator-client-2.12.0.jar ./

6.6启动环境变量

./zeppelin-daemon.sh 

7.Zookeeper安装

7.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Zookeeper至/opt/software目录中
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/apache-zookeeper-3.5.7-bin /opt/software/zookeeper357

7.2配置环境变量

vim /etc/profile.d/myenv.sh

#zookeeper
export ZOOKEEPER_HOME=/opt/software/zookeeper357
export PATH=$ZOOKEEPER_HOME/bin:$PATH

#激活环境变量
source /etc/profile

7.3创建数据目录

cd /opt/software/zookeeper357
mkdir mydata
vim myid	#如果是集群不同节点上的myid不能相同 1~255

---------------
1
---------------

7.4修改配置文件

cd /opt/software/zookeeper357/conf
mv zoo_sample.cfg zoo.cfg
vim zoo.cfg
-------------------------------------------
dataDir=/opt/software/zookeeper357/mydata
server.1=singlehenry:2888:3888
#如果是集群需要列出所有节点
server.n=who:2888:3888
-------------------------------------------

7.5启动/关闭服务

zkServer.sh start|status|stop

8.HBase安装及配置

8.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装HBase至/opt/software目录中
tar -zxvf hbase-2.3.5-bin.tar.gz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/hbase-2.3.5-bin /opt/software/hbase235

8.2配置环境变量

vim /etc/profile.d/myenv.sh

#hbase
export HBASE_HOME=/opt/software/hbase235
export PATH=$HBASE_HOME/bin:$PATH

#激活环境变量
source /etc/profile

8.3修改配置文件

cd /opt/software/hbase235/cnf

修改hbase-env.sh执行文件

export JAVA_HOME=/opt/software/jdk
export HBASE_MANAGES_ZK=false	#不使用hbase自带的zookeeper

修改hbase-site.xml配置文件

<property>
			<name>hbase.rootdir</name>
			<value>hdfs://singlehenry:9820/hbase</value>
		</property>
		<property>
			<name>hbase.cluster.distributed</name>
			<value>true</value>
		</property>
		<property>
			<name>hbase.tmp.dir</name>
			<value>/opt/software/hbase235/tmp</value>
		</property>
		<!--如果是集群需要将所有节点上的zk节点以逗号分隔配置-->
		<property>
			<name>hbase.zookeeper.quorum</name>
			<value>20.0.0.180:2181</value>
		</property>
		<property>
			<name>hbase.master</name>
			<value>hdfs://singlehenry:60000</value>
		</property>
		<property>
			<name>hbase.master.info.port</name>
			<value>60010</value>
		</property>
		<property>
			<name>hbase.unsafe.stream.capability.enforce</name>
			<value>false</value>
		</property>

8.4启动服务

start-hbase.sh

9.Sqoop安装及配置

目前Sqoop已停更,已经不支持hbase 2.以上的版本

9.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装sqoop至/opt/software目录中
tar -zxvf sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/sqoop-1.4.6.bin__hadoop-2.0.4-alpha /opt/software/sqoop146

9.2配置环境变量

vim /etc/profile.d/myenv.sh

#sqoop
export SQOOP_HOME=/opt/software/sqoop146
export PATH=$SQOOP_HOME/bin:$PATH

#激活环境变量
source /etc/profile

9.3资源拷贝

#切换到sqoop的lib目录
cd /opt/software/sqoop146/lib/
#在hive的lib中查找mysql jdbc的jar包
find /opt/software/hive312/ -name 'mysql*.jar'
#拷贝mysql jdbc的jar包至Sqoop的lib目录
cp /opt/software/hive312/lib/mysql-connector-java-5.1.47.jar ./
#拷贝hadoop下的相关jar包至Sqoop的lib目录
cp /opt/software/hadoop313/share/hadoop/common/hadoop-common-3.1.3.jar ./
cp /opt/software/hadoop313/share/hadoop/hdfs/hadoop-hdfs-3.1.3.jar ./
cp /opt/software/hadoop313/share/hadoop/mapreduce/hadoop-mapreduce-client-core-3.1.3.jar ./

9.4修改配置文件

cd /opt/software/sqoop146/conf
mv sqoop-env-template.sh sqoop-env.sh

vim sqoop-env.sh
-------------------------------------------------------
export HADOOP_COMMON_HOME=/opt/software/hadoop313
export HADOOP_MAPRED_HOME=/opt/software/hadoop313
export HBASE_HOME=/opt/software/hbase235
export HIVE_HOME=/opt/software/hive312
export ZOOCFGDIR=/opt/software/zookeeper357
-------------------------------------------------------

10.Flume安装及配置

10.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Flume至/opt/software目录中
tar -zxvf apache-flume-1.9.0-bin.tar.gz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/apache-flume-1.9.0-bin /opt/software/flume190

10.2配置环境变量

vim /etc/profile.d/myenv.sh

#flume
export FLUME_HOME=/opt/software/flume190
export PATH=$FLUME_HOME/bin:$PATH

#激活环境变量
source /etc/profile

10.3修改配置文件

cd /opt/software/flume190/conf
mv flume-env.sh.template flume-env.sh

vim flume-env.sh

------------------------------------
export JAVA_HOME=/opt/software/jdk
------------------------------------

10.4拷贝资源jar包
由于flume1.9.0中guava包版本为11.0.2过低,需要删除后从hadoop lib库中拷贝

#切换至flume的lib库
cd /opt/software/flume190/lib
#删除guava的jar包
rm -f guava-11.0.2.jar
#查找hadoop中guava的jar包
find /opt/software/hadoop313/ -name 'guava*.jar'
#将hadoop中的guava的jar包拷贝至flume 的lib下
cp /opt/software/hadoop/hadoop313/share/hadoop/common/lib/guava-27.0-jre.jar ./

#拷贝hive下的资源jar包
cp /opt/software/hive312/hcatalog/share/hcatalog/*.jar ./

11.Scala安装及配置

11.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Scala至/opt/software目录中
tar -zxvf scala-2.12.10.tgz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/scala-2.12.10 /opt/software/scala212

11.2配置环境变量

vim /etc/profile.d/myenv.sh

#scala
export SCALA_HOME=/opt/software/scala212
export PATH=$SCALA_HOME/bin:$PATH

#激活环境变量
source /etc/profile

11.3Scala启动命令

scala

12.Spark安装及配置

12.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Spark至/opt/software目录中
tar -zxvf spark-3.1.2-bin-hadoop3.2.tgz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/spark-3.1.2-bin-hadoop3.2 /opt/software/spark312

12.2配置环境变量

vim /etc/profile.d/myenv.sh

#spark
export SPARK_HOME=/opt/software/spark312
export PATH=$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH


#激活环境变量
source /etc/profile

12.3修改配置文件

cd /opt/software/spark312/conf
mv spark-env.sh.template spark-env.sh

vim spark-env.sh

------------------------------------
# - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
export SPARK_MASTER_HOST=test	#主机别名
# - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
export SPARK_MASTER_PORT=7077
------------------------------------

12.4启动服务

#先启动master
start-master.sh
#后启动worker
start-worker.sh spark://test:7077

12.5客户端启动

#测试
spark-shell
#基于yarn启动
spark-shell --master yarn
#基于spark master启动
spark-shell --master spark://test:7077

13.Kafka安装及配置

13.1解压安装

#1.切换到安装包存放目录
cd /opt/download
#2.解压安装Kafka至/opt/software目录中
tar -zxvf kafka_2.12-2.8.0.tgz -C /opt/software/
#3.重命名,方便配置环境变量
mv /opt/software/kafka_2.12-2.8.0 /opt/software/kafka280scala212

13.2配置环境变量

vim /etc/profile.d/myenv.sh

#kafka
export KAFKA_HOME=/opt/software/kafka280scala212
export PATH=$KAFKA_HOME/bin:$PATH

#激活环境变量
source /etc/profile

13.3修改配置文件

cd /opt/software/kafka280scala212/conf/kraft


vim server.properties

------------------------------------
advertised.listeners=PLAINTEXT://192.168.131.200:9092
------------------------------------

13.4kafka初始化

kafka-storage.sh random-uuid
------------------------------------
	gS3c0nE3TcG9LmLpBo0vIQ
------------------------------------
kafka-storage.sh format -t gS3c0nE3TcG9LmLpBo0vIQ -c config/kraft/server.properties
#如果多个节点,每个节点都要执行

13.5kafka启动命令

kafka-server-start.sh -daemon config/kraft/server.properties
GitHub 加速计划 / li / linux-dash
10.39 K
1.2 K
下载
A beautiful web dashboard for Linux
最近提交(Master分支:2 个月前 )
186a802e added ecosystem file for PM2 4 年前
5def40a3 Add host customization support for the NodeJS version 4 年前
Logo

旨在为数千万中国开发者提供一个无缝且高效的云端环境,以支持学习、使用和贡献开源项目。

更多推荐