diff --git a/CentOS 9 Stream(aarch64).md b/CentOS 9 Stream(aarch64).md
new file mode 100644
index 0000000..e082f09
--- /dev/null
+++ b/CentOS 9 Stream(aarch64).md
@@ -0,0 +1,182 @@
+CentOS 9 Stream
+
+yum配置文件路径
+
+```
+/etc/yum.repos.d/centos.repo
+```
+
+
+备份源配置
+
+```
+ mv /etc/yum.repos.d/centos.repo /etc/yum.repos.d/centos.repo.backup
+```
+
+阿里源配置
+
+```
+# CentOS-Base.repo
+#
+# The mirror system uses the connecting IP address of the client and the
+# update status of each mirror to pick mirrors that are updated to and
+# geographically close to the client. You should use this for CentOS updates
+# unless you are manually picking other mirrors.
+#
+# If the mirrorlist= does not work for you, as a fall back you can try the
+# remarked out baseurl= line instead.
+#
+#
+
+[base]
+name=CentOS-$releasever - Base - mirrors.aliyun.com
+#failovermethod=priority
+baseurl=https://mirrors.aliyun.com/centos-stream/$stream/BaseOS/$basearch/os/
+ http://mirrors.aliyuncs.com/centos-stream/$stream/BaseOS/$basearch/os/
+ http://mirrors.cloud.aliyuncs.com/centos-stream/$stream/BaseOS/$basearch/os/
+gpgcheck=1
+gpgkey=https://mirrors.aliyun.com/centos-stream/RPM-GPG-KEY-CentOS-Official
+
+#additional packages that may be useful
+#[extras]
+#name=CentOS-$releasever - Extras - mirrors.aliyun.com
+#failovermethod=priority
+#baseurl=https://mirrors.aliyun.com/centos-stream/$stream/extras/$basearch/os/
+# http://mirrors.aliyuncs.com/centos-stream/$stream/extras/$basearch/os/
+# http://mirrors.cloud.aliyuncs.com/centos-stream/$stream/extras/$basearch/os/
+#gpgcheck=1
+#gpgkey=https://mirrors.aliyun.com/centos-stream/RPM-GPG-KEY-CentOS-Official
+
+#additional packages that extend functionality of existing packages
+[centosplus]
+name=CentOS-$releasever - Plus - mirrors.aliyun.com
+#failovermethod=priority
+baseurl=https://mirrors.aliyun.com/centos-stream/$stream/centosplus/$basearch/os/
+ http://mirrors.aliyuncs.com/centos-stream/$stream/centosplus/$basearch/os/
+ http://mirrors.cloud.aliyuncs.com/centos-stream/$stream/centosplus/$basearch/os/
+gpgcheck=1
+enabled=0
+gpgkey=https://mirrors.aliyun.com/centos-stream/RPM-GPG-KEY-CentOS-Official
+
+[PowerTools]
+name=CentOS-$releasever - PowerTools - mirrors.aliyun.com
+#failovermethod=priority
+baseurl=https://mirrors.aliyun.com/centos-stream/$stream/PowerTools/$basearch/os/
+ http://mirrors.aliyuncs.com/centos-stream/$stream/PowerTools/$basearch/os/
+ http://mirrors.cloud.aliyuncs.com/centos-stream/$stream/PowerTools/$basearch/os/
+gpgcheck=1
+enabled=0
+gpgkey=https://mirrors.aliyun.com/centos-stream/RPM-GPG-KEY-CentOS-Official
+
+
+[AppStream]
+name=CentOS-$releasever - AppStream - mirrors.aliyun.com
+#failovermethod=priority
+baseurl=https://mirrors.aliyun.com/centos-stream/$stream/AppStream/$basearch/os/
+ http://mirrors.aliyuncs.com/centos-stream/$stream/AppStream/$basearch/os/
+ http://mirrors.cloud.aliyuncs.com/centos-stream/$stream/AppStream/$basearch/os/
+gpgcheck=1
+gpgkey=https://mirrors.aliyun.com/centos-stream/RPM-GPG-KEY-CentOS-Official
+```
+
+```
+[baseos]
+name=CentOS Stream $releasever - BaseOS
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/BaseOS/$basearch/os
+# metalink=https://mirrors.centos.org/metalink?repo=centos-baseos-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+countme=1
+enabled=1
+
+[baseos-debuginfo]
+name=CentOS Stream $releasever - BaseOS - Debug
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/BaseOS/$basearch/debug/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-baseos-debug-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+
+[baseos-source]
+name=CentOS Stream $releasever - BaseOS - Source
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/BaseOS/source/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-baseos-source-$stream&arch=source&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+
+[appstream]
+name=CentOS Stream $releasever - AppStream
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/AppStream/$basearch/os
+# metalink=https://mirrors.centos.org/metalink?repo=centos-appstream-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+countme=1
+enabled=1
+
+[appstream-debuginfo]
+name=CentOS Stream $releasever - AppStream - Debug
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/AppStream/$basearch/debug/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-appstream-debug-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+
+[appstream-source]
+name=CentOS Stream $releasever - AppStream - Source
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/AppStream/source/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-appstream-source-$stream&arch=source&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+
+[crb]
+name=CentOS Stream $releasever - CRB
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/CRB/$basearch/os
+# metalink=https://mirrors.centos.org/metalink?repo=centos-crb-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+countme=1
+enabled=1
+
+[crb-debuginfo]
+name=CentOS Stream $releasever - CRB - Debug
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/CRB/$basearch/debug/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-crb-debug-$stream&arch=$basearch&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+
+[crb-source]
+name=CentOS Stream $releasever - CRB - Source
+baseurl=https://mirrors.ustc.edu.cn/centos-stream/$releasever-stream/CRB/source/tree/
+# metalink=https://mirrors.centos.org/metalink?repo=centos-crb-source-$stream&arch=source&protocol=https,http
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
+gpgcheck=1
+repo_gpgcheck=0
+metadata_expire=6h
+enabled=0
+```
+
+更新缓存
+
+```
+yum makecache && yum update
+```
+
diff --git a/ProtoBuf.md b/ProtoBuf.md
new file mode 100644
index 0000000..df1c146
--- /dev/null
+++ b/ProtoBuf.md
@@ -0,0 +1,321 @@
+# ProtoBuf简明教程
+
+## 1.什么是Protobuf
+
+Protobuf 是一个无关语言,无关平台的,用于序列化结构 化数据的工具。相较于JSON体积更小,传输更快。
+Protobuf 定义在,proto文件中,在特定语言进行编译时,进行动态编译。
+
+1. 序列化:将数据结构转换为字节流,便于网络传输和存储。
+2. 高效性:相比于JSON,Protobuf 序列化后的字节流更小,传输更快。
+3. 兼容性:Protobuf 支持多种语言,使得数据在不同语言之间进行通信和交互更加方便。
+4. 可读性:Protobuf 的定义文件是纯文本
+
+## 2.主要应用场景
+
+1. 网络通信:Protobuf 适用于网络通信场景,例如在分布式系统中进行数据传输和通信。
+2. 数据存储:Protobuf 可以将数据结构存储在文件或数据库中,使得数据的存储和检索更加高效。
+3. 配置文件:Protobuf 可以用于存储和传输配置信息,例如应用程序的配置参数。
+
+## 3.Java中使用
+
+创建一个Maven项目其中使用 Java 作为 Client 端,Python 作为 Server 端
+
+```xml
+
+ com.google.protobuf
+ protobuf-java
+ 4.27.2
+
+```
+
+在项目中创建文件夹 `script` 在其下创建 Protobuf 文件 `video_info.proto`
+
+其中内容如下
+
+```protobuf
+syntax = "proto3";
+
+message VideoFeature {
+ optional int32 author_gender = 1 ;
+ optional int64 channel_id = 2;
+}
+```
+
+然后导入对应的 `protoc` 工程文件,[下载对应版本的文件](https://github.com/protocolbuffers/protobuf/releases/tag/v27.2)
+并解压到`script`目录下
+
+然后创建一个生成脚本 `build_pb.sh`内容如下
+
+```sh
+#!/bin/bash
+SRC_DIR="."
+JAVA_DST_DIR="../src/main/java"
+PYTHON_DST_DIR="../src/main/python"
+
+./protoc-27.2-osx-aarch_64/bin/protoc -I=$SRC_DIR --java_out=$JAVA_DST_DIR $SRC_DIR/AllTypes.proto
+./protoc-27.2-osx-aarch_64/bin/protoc -I=$SRC_DIR --python_out=$PYTHON_DST_DIR $SRC_DIR/AllTypes.proto
+```
+
+其中最后面的文件名是上面创建的`proto`文件的名称,运行sh脚本
+
+能够生成两个文件 `VideoInfo.java` `video_info_pb2.py`
+
+然后我们创建两个运行文件 `Client.java` `Server.py`,其中内容如下
+
+```java
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.net.Socket;
+
+/**
+ * ClassName: Client
+ * Package: com.yovinchen.protobuf
+ *
+ * @author yovinchen
+ * @since 2024/7/25 上午9:33
+ */
+
+public class Client {
+
+ public static byte[] msg;
+
+ static {
+ VideoInfo.VideoFeature feature = VideoInfo.VideoFeature.newBuilder()
+ .setAuthorGender(123)
+ .setChannelId(321)
+ .build();
+ msg = feature.toByteArray();
+
+// msg = "测试字符串".getBytes();
+// msg = "{\"author_gender\":123,\"channel_id\":321}".getBytes();
+ }
+
+ public static void main(String[] args) throws IOException {
+ System.out.println("客户端启动...");
+ // 创建一个流套接字并将其连接到指定主机上的指定端口号
+ Socket socket = new Socket("localhost", 8001);
+ // 向服务器端发送数据
+ DataOutputStream out = new DataOutputStream(socket.getOutputStream());
+ out.write(msg);
+ out.close();
+ socket.close();
+ }
+}
+```
+
+```python
+import socket
+
+import video_info_pb2
+
+
+def parse(buf):
+ try:
+ video_feature = video_info_pb2.VideoFeature()
+ video_feature.ParseFromString(buf)
+ return video_feature
+ except Exception:
+ return "暂时不支持转换"
+
+
+if __name__ == "__main__":
+ print("Server is starting")
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('localhost', 8001)) # 配置soket,绑定IP地址和端口号
+ sock.listen(5) # 设置最大允许连接数
+ while True: # 循环轮询socket状态,等待访问
+ connection, address = sock.accept()
+ buf = connection.recv(1024)
+ print(f"原始数据:{buf}")
+ print(f"数据长度:{len(buf)}")
+
+ print(parse(buf))
+ connection.close()
+
+```
+
+然后先运行 `Server.py` 然后再运行 `Client.java`,然后就能在Server看到原始数据以及解析出来的数据。
+
+解除`msg`中的注释测试`Json`进行数据传输的数据长度
+
+![image-20240729103528251](https://lsky.hhdxw.top/imghub/2024/07/image-202407291722220767.png)
+
+## 4.测试
+
+```protobuf
+syntax = "proto3";
+
+// 定义一个消息,该消息包含所有基本的数据类型。
+message AllTypes {
+ // 布尔类型
+ bool bool_field = 1; // 布尔值
+
+ // 字符串类型
+ string string_field = 2; // UTF-8 编码的字符串
+
+ // 字节流类型
+ bytes bytes_field = 3; // 原始字节流
+
+ // 整数类型
+ int32 int32_field = 4; // 32位有符号整数
+ int64 int64_field = 5; // 64位有符号整数
+ uint32 uint32_field = 6; // 32位无符号整数
+ uint64 uint64_field = 7; // 64位无符号整数
+ sint32 sint32_field = 8; // 32位有符号整数,使用 zigzag 编码
+ sint64 sint64_field = 9; // 64位有符号整数,使用 zigzag 编码
+
+ // 浮点数类型
+ float float_field = 14; // 单精度浮点数
+ double double_field = 15; // 双精度浮点数
+
+ // 固定宽度整数类型
+ fixed32 fixed32_field = 10; // 32位无符号整数,小端存储
+ fixed64 fixed64_field = 11; // 64位无符号整数,小端存储
+ sfixed32 sfixed32_field = 12; // 32位有符号整数,小端存储
+ sfixed64 sfixed64_field = 13; // 64位有符号整数,小端存储
+
+ // 重复字段类型
+ repeated int32 repeated_int32_field = 31; // 可以包含多个元素的 int32 字段
+
+ // 映射字段类型
+ map map_int32_string_field = 32; // 键为 int32,值为 string 的映射
+
+ // 枚举类型
+ EnumType enum_field = 33; // 枚举类型字段
+
+ // 嵌套消息类型
+ MessageType nested_message_field = 34; // 另一个消息类型的字段
+
+ // 嵌套的消息类型定义
+ message MessageType {
+ string nested_string_field = 1; // 嵌套消息中的字符串字段
+ }
+
+ // 枚举类型定义
+ enum EnumType {
+ ENUM_VALUE_0 = 0; // 枚举值 0
+ ENUM_VALUE_1 = 1; // 枚举值 1
+ ENUM_VALUE_2 = 2; // 枚举值 2
+ }
+}
+
+// 以下是用于包装基本类型的特殊消息类型,它们允许携带额外的元数据,如 null 值。
+message BoolValue {bool value = 1;} // 包装布尔值
+message StringValue {string value = 1;} // 包装字符串值
+message BytesValue {bytes value = 1;} // 包装字节流值
+message Int32Value {int32 value = 1;} // 包装 32 位整数值
+message Int64Value {int64 value = 1;} // 包装 64 位整数值
+message UInt32Value {uint32 value = 1;} // 包装无符号 32 位整数值
+message UInt64Value {uint64 value = 1;} // 包装无符号 64 位整数值
+message SInt32Value {sint32 value = 1;} // 包装 zigzag 编码的 32 位整数值
+message SInt64Value {sint64 value = 1;} // 包装 zigzag 编码的 64 位整数值
+message Fixed32Value {fixed32 value = 1;} // 包装小端存储的 32 位整数值
+message Fixed64Value {fixed64 value = 1;} // 包装小端存储的 64 位整数值
+message SFixed32Value {sfixed32 value = 1;} // 包装小端存储的 32 位有符号整数值
+message SFixed64Value {sfixed64 value = 1;} // 包装小端存储的 64 位有符号整数值
+message FloatValue {float value = 1;} // 包装单精度浮点数值
+message DoubleValue {double value = 1;} // 包装双精度浮点数值
+```
+
+服务端和接收端`AllTypesClient.java` `AllTypeServer.py`
+
+```java
+import com.google.protobuf.ByteString;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.net.Socket;
+
+/**
+ * ClassName: Client
+ * Package: com.yovinchen.protobuf
+ *
+ * @author yovinchen
+ * @since 2024/7/25 上午9:33
+ */
+public class AllTypesClient {
+
+ public static void main(String[] args) throws IOException {
+ System.out.println("客户端启动...");
+
+ // 创建一个 AllTypes 消息实例
+ AllTypesOuterClass.AllTypes.Builder builder = AllTypesOuterClass.AllTypes.newBuilder();
+ builder.setBoolField(true);
+ builder.setStringField("测试字符串");
+ builder.setBytesField(ByteString.copyFromUtf8("字节流"));
+ builder.setInt32Field(123);
+ builder.setInt64Field(123L);
+ builder.setUint32Field(456);
+ builder.setUint64Field(456L);
+ builder.setSint32Field(-123);
+ builder.setSint64Field(-123L);
+ builder.setFixed32Field(123);
+ builder.setFixed64Field(123L);
+ builder.setSfixed32Field(-123);
+ builder.setSfixed64Field(-123L);
+ builder.setFloatField(123.45f);
+ builder.setDoubleField(123.45);
+ builder.addRepeatedInt32Field(1);
+ builder.addRepeatedInt32Field(2);
+ builder.putMapInt32StringField(1, "value1");
+ builder.putMapInt32StringField(2, "value2");
+ builder.setEnumField(AllTypesOuterClass.AllTypes.EnumType.ENUM_VALUE_1);
+ builder.setNestedMessageField(AllTypesOuterClass.AllTypes.MessageType.newBuilder()
+ .setNestedStringField("嵌套字符串")
+ .build());
+
+ // 构建消息
+ AllTypesOuterClass.AllTypes allTypesMsg = builder.build();
+
+ // 创建一个流套接字并将其连接到指定主机上的指定端口号
+ Socket socket = new Socket("localhost", 8001);
+
+ // 向服务器端发送数据
+ DataOutputStream out = new DataOutputStream(socket.getOutputStream());
+ out.write(allTypesMsg.toByteArray());
+
+ out.close();
+ socket.close();
+ }
+}
+```
+
+```python
+import socket
+
+import AllTypes_pb2
+
+
+def parse(buf):
+ try:
+ all_types_msg = AllTypes_pb2.AllTypes() # 创建 AllTypes 消息实例
+ all_types_msg.ParseFromString(buf) # 从字节流中解析消息
+ return all_types_msg # 返回解析后的消息实例
+ except Exception as e:
+ print(f"Error parsing message: {e}")
+ return None # 如果解析失败,返回 None 或者自定义的错误信息
+
+
+if __name__ == "__main__":
+ print("Server is starting")
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('localhost', 8001))
+ sock.listen(5)
+
+ while True:
+ connection, address = sock.accept()
+ buf = connection.recv(1024)
+ print(f"原始数据: {buf}")
+ print(f"数据长度:{len(buf)}")
+
+ parsed_msg = parse(buf)
+ if parsed_msg is not None:
+ print(parsed_msg) # 输出解析后的消息
+ else:
+ print("无法解析消息")
+
+ connection.close()
+
+```
+
+![image-20240729103332905](https://lsky.hhdxw.top/imghub/2024/07/image-202407291722220780.png)
\ No newline at end of file
diff --git a/Redis 集群命令.md b/Redis 集群命令.md
index 1d2fd79..86f64f2 100644
--- a/Redis 集群命令.md
+++ b/Redis 集群命令.md
@@ -199,8 +199,8 @@ redis-cli --cluster reshard : --cluster-from --cluster-to `: 指定源节点。
-- `--cluster-to `: 指定目标节点。
+- `--cluster-from `: 指定源节点IP。
+- `--cluster-to `: 指定目标节点IP。
- `--cluster-slots `: 指定需要迁移的槽数。
- `--cluster-yes`: 自动确认操作。
- `--cluster-timeout `: 设置操作超时时间。
@@ -212,7 +212,7 @@ redis-cli --cluster reshard : --cluster-from --cluster-to
+#include
+#include
+using namespace std;
+int a[101],f[101][10001]={0};
+int main()
+{
+ int n,m;
+ cin>>n>>m;
+ for(int i=1;i<=n;++i)cin>>a[i];
+ for(int i=1;i<=n;++i)
+ for(int j=1;j<=m;++j)
+ {
+ if(j==a[i])f[i][j]=f[i-1][j]+1;
+ if(j>a[i]) f[i][j]=f[i-1][j]+f[i-1][j-a[i]];
+ if(j
+#include
+using namespace std;
+void help(int n, int m, vector& v, int beg) {
+ //if (beg>n) return;
+ if (m == 0) {
+ for (int i = 0; i> n >> m) {
+ vectorv;
+ help(n, m, v, 1);
+ }
+}
+```
+
+ (3)求从输入的三个数中最接近x 的然后输出对应字符
+ https://leetcode.cn/problems/3sum-closest/solutions/1891439/javati-jie-by-zejiang-wga8/
+
+
+
+
+1.实现多线程方法有哪些
+2.线程池核心参数和执行流程
+3.工具类实现线程池有哪些已经如何选择(不知道应用场景,扯了扯美团动态线程池)
+4.java里面锁有哪些
+5.synchrorized 锁升级、锁粗化、锁消退(就答了锁升级)
+6.markword除了对象头还有哪些
+7.jvm内存结构、对象判断存活算法、垃圾回收算法
+8.类加载过程、双亲委派机制、如何打破(打破忘记了,举了热部署插件可能用到)
+9.JMM内存模型、cas原理、volatile关键字
+10.mysql索引有哪些、b树和b加树区别、索引失效情况、索引下推、回表查询
+11.范围查询索引会失效吗?explain 中关注哪些字段、type字段怎么判断是不是最优
+12.事务特性、隔离级别、事务产生问题
+13.redis 数据结构有哪些项目里面如何使用
+14.大key问题如何解决、aof和rdb区别、项目中用到那种redis集群模式?都有什么区别?
+15.缓存击穿、穿透、雪绷怎么解决、说一说布隆过滤器
+16.项目里面使用到的一级缓存、二级缓存双写一致性(这个问题和我讨论了10分钟)
+17. 项目里面使用到的aop+注解+redis实现的滑动窗口限流怎么做的。
+18.手撕二路归并算法(直接秒了)
+19.反问
+
+有个场景题是问导入10万数据入Excel如何优化,还有一个是数据同步时如何保证最终一致性
+然后一直拷打项目
+最后问了选择题那个byte值赋128会变成什么,然后问赋129会变成什么
+讲一下tcp/ip协议
+项目中用到了ConCurrentHashMap,讲一下
+讲一下丢包
+讲一下mysql索引结构
+一面 4月18
+50分钟
+为什么想来长沙?
+了解操作系统吗,内核态和用户态
+还有一个操作系统的忘记了
+TCP和UDP的区别
+TCP怎么实现可靠的
+讲一下数据结构,数组,链表,hashmap
+Hashmap扩容,为什么要2的指数这样扩容
+只是因为取余用位运算提高效率吗
+介绍下java中的乐观锁和悲观锁
+Volite关键字,可见性是怎么实现的
+CAS介绍一下?
+在什么地方用的?Concurrenthashmap
+什么是幂等,幂等怎么实现的,
+Spring了解吗,ioc和aop,循环依赖怎么解决的
+Mysql事务了解吗
+怎么实现原子性的,
+Mysql三大日志
+Mysam和innodb 的区别
+怎么优化sql查询
+MVCC
+JMM了解吗?
+JVM了解多少
+OOM在什么情况下会出现,有了解吗?
+怎么解决的?尽量避免OOM
+Redis了解吗?介绍一下
+Redis持久化
+Redis怎么保证原子性
+了解IO多路复用吗
+Lua脚本
+AOF你有了解过吗?AOF日志重写知道吗
+RocketMQ怎么保证消息不丢失的
+如何保证不重复消费
+
+场景题:
+1.QPS 10000,怎么让接口顶住压力,已经有一万,不能通过前端控制,也不能限流
+
+2.抽奖活动怎么保证,数据库不宕机
+
+建议,多看源码,看看书
+
+二面 4月23
+50分钟
+自我介绍
+拷打实习
+拷打项目
+各种数据结构
+设计模式
+
+场景
+怎么确定一个项目的,性能瓶颈出现在哪里
+怎么去优化
+怎么提高项目的性能瓶颈
+从代码层面,架构设计层面等分析
+高并发下的解决方案
+反问
+🕒岗位/面试时间
+长沙 Java后端
+
+作者:想逆袭好楠
+链接:[https://www.nowcoder.com/feed/main/detail/cad5079c35ba4006936a7deec6780cd9?sourceSSR=dynamic](https://www.nowcoder.com/feed/main/detail/cad5079c35ba4006936a7deec6780cd9?sourceSSR=dynamic)
+来源:牛客网
\ No newline at end of file
diff --git a/环境/CentOS 国内镜像源 x86_64.md b/环境/CentOS 国内镜像源 x86_64.md
index 168b4d4..e90d205 100644
--- a/环境/CentOS 国内镜像源 x86_64.md
+++ b/环境/CentOS 国内镜像源 x86_64.md
@@ -64,7 +64,7 @@ CentOS Stream 9 x86_64
cd /etc/yum.repos.d/
//创建新文件夹并将源文件备份为repo.bak
-mkdir backup && mv *repo backup/
+mkdir backup && mv *repo backup/
//下载国内yum源文件
sed -i 's|metalink|#metalink|g' /etc/yum.repos.d/*.repo
diff --git a/环境/安装教程/Centos 安装docker.md b/环境/安装教程/Centos 安装docker.md
index 828b54b..e76c536 100644
--- a/环境/安装教程/Centos 安装docker.md
+++ b/环境/安装教程/Centos 安装docker.md
@@ -75,6 +75,22 @@ sudo vim /etc/docker/daemon.json
}
```
+```shell
+sudo mkdir -p /etc/docker
+sudo tee /etc/docker/daemon.json <<-'EOF'
+{
+ "registry-mirrors": [
+ "https://docker.m.daocloud.io",
+ "https://dockerproxy.com",
+ "https://docker.mirrors.ustc.edu.cn",
+ "https://docker.nju.edu.cn"
+ ]
+}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+```
+
上传地址设置
```json
diff --git a/环境/安装教程/Docker环境部署.md b/环境/安装教程/Docker环境部署.md
new file mode 100644
index 0000000..fb4bba1
--- /dev/null
+++ b/环境/安装教程/Docker环境部署.md
@@ -0,0 +1,1325 @@
+## 关系型数据库
+
+------
+
+### MySQL
+
+```shell
+docker pull bitnami/mysql:latest
+
+docker run -itd \
+ --name mysql-server \
+ -p 3306:3306 \
+ -e ALLOW_EMPTY_PASSWORD=yes \
+ -e MYSQL_ROOT_PASSWORD=root \
+ bitnami/mysql:latest
+```
+
+### MariaDB
+
+```shell
+docker pull bitnami/mariadb:latest
+
+docker run -itd \
+ --name mariadb-test \
+ -p 3306:3306 \
+ -e ALLOW_EMPTY_PASSWORD=yes \
+ -e MARIADB_ROOT_PASSWORD=root \
+ bitnami/mariadb:latest
+```
+
+### PostgreSQL
+
+```
+docker pull bitnami/postgresql:latest
+docker pull bitnami/postgresql-repmgr:latest
+docker pull bitnami/pgbouncer:latest
+docker pull bitnami/pgpool:latest
+docker pull bitnami/postgres-exporter:latest
+
+docker run -itd \
+ --name postgres-test \
+ -p 5432:5432 \
+ -e POSTGRES_PASSWORD=root \
+ bitnami/postgresql:latest
+
+docker exec -it postgres-test "apt update"
+```
+
+### Citus
+
+Citus的策略是把分区(Partition)数据分布到各个工作节点。
+
+#### 单机部署
+
+单机部署,相当于整个集群只有一个协调器,而没有旁的工作节点:
+
+```
+docker run -itd \
+ --name citus-standalone \
+ -p 5432:5432 \
+ -e POSTGRES_PASSWORD=123456 \
+ citusdata/citus:latest
+```
+
+#### 最小集群部署
+
+所有操作都是通过协调器节点进行操作,所以,对外只暴露协调器的端口,其他的工作节点却并不暴露,他们在内部通过docker网络进行通讯:
+
+```
+# 添加网络
+docker network create citus-network
+
+# 添加协调器节点
+docker run --name citus-coordinator1 \
+ --network=citus-network \
+ -p 5432:5432 \
+ -e POSTGRES_PASSWORD=123456 \
+ -d citusdata/citus:latest
+
+# 添加工作节点1
+docker run --name citus-work1 \
+ --network=citus-network \
+ -e POSTGRES_PASSWORD=123456 \
+ -d citusdata/citus:latest
+
+# 添加工作节点2
+docker run --name citus-work2 \
+ --network=citus-network \
+ -e POSTGRES_PASSWORD=123456 \
+ -d citusdata/citus:latest
+```
+
+在协调器节点进行SQL操作:
+
+```
+-- 设置协调器节点信息
+SELECT citus_set_coordinator_host('citus-coordinator1', 5432);
+
+-- 添加工作节点
+SELECT * from citus_add_node('citus-work1', 5432);
+SELECT * from citus_add_node('citus-work2', 5432);
+
+-- 列表查看worker节点
+SELECT * FROM citus_get_active_worker_nodes();
+```
+
+### Greenplum
+
+```
+docker run -itd \
+ --name greenplum-standalone \
+ -p 5432:5432 \
+ projectairws/greenplum:latest
+```
+
+### SQLServer
+
+```
+docker pull mcr.microsoft.com/mssql/server:2019-latest
+
+docker run -itd \
+ --name MSSQL_1433 \
+ -m 512m \
+ -e "ACCEPT_EULA=Y" \
+ -e "SA_PASSWORD=Abcd123456789*" \
+ -p 1433:1433 \
+ mcr.microsoft.com/mssql/server:2019-latest
+```
+
+### TiDB
+
+```
+docker pull pingcap/tidb:latest
+docker pull pingcap/tikv:latest
+docker pull pingcap/pd:latest
+
+docker run -itd \
+ --name tidb-test \
+ -v /data/tidb/data:/tmp/tidb \
+ --privileged=true \
+ -p 4000:4000 \
+ -p 10080:10080 \
+ pingcap/tidb:latest
+```
+
+## 图数据库
+
+------
+
+### Neo4J
+
+```
+docker pull bitnami/neo4j:latest
+
+docker run -itd \
+ --name neo4j-test \
+ -p 7473:7473 \
+ -p 7687:7687 \
+ -p 7474:7474 \
+ -e NEO4J_PASSWORD=bitnami \
+ bitnami/neo4j:latest
+```
+
+## 时序型数据库
+
+------
+
+### InfluxDB
+
+```
+docker pull bitnami/influxdb:latest
+
+docker run -itd \
+ --name influxdb-test \
+ -p 8083:8083 \
+ -p 8086:8086 \
+ -e INFLUXDB_HTTP_AUTH_ENABLED=true \
+ -e INFLUXDB_ADMIN_USER=admin \
+ -e INFLUXDB_ADMIN_USER_PASSWORD=123456789 \
+ -e INFLUXDB_ADMIN_USER_TOKEN=admintoken123 \
+ -e INFLUXDB_DB=my_database \
+ bitnami/influxdb:latest
+create user "admin" with password '123456789' with all privileges
+```
+
+- 管理后台: http://localhost:8086/
+
+### TimescaleDB
+
+```
+docker pull timescale/timescaledb:latest-pg14
+docker pull timescale/timescaledb:latest-pg15
+docker pull timescale/timescaledb-postgis:latest-pg13
+docker pull timescale/pg_prometheus:latest-pg11
+
+docker run -itd \
+ --name timescale-test \
+ -p 5432:5432 \
+ -e POSTGRES_PASSWORD=123456 \
+ timescale/timescaledb:latest-pg15
+```
+
+- 默认账号:postgres
+- 默认密码:123456
+
+### OpenTSDB
+
+```
+docker pull petergrace/opentsdb-docker:latest
+
+docker run -itd \
+ --name opentsdb-test \
+ -p 4242:4242 \
+ petergrace/opentsdb-docker:latest
+```
+
+- 管理后台 [http://localhost:4242](http://localhost:4242/)
+
+### QuestDB
+
+```
+docker pull questdb/questdb:latest
+
+docker run -itd \
+ --name questdb-test \
+ -p 9000:9000 \
+ -p 8812:8812 \
+ -p 9009:9009 \
+ questdb/questdb:latest
+```
+
+### TDengine
+
+```
+docker pull tdengine/tdengine:latest
+
+docker run -itd \
+ --name tdengine-test \
+ -p 6030-6041:6030-6041 \
+ -p 6030-6041:6030-6041/udp \
+ tdengine/tdengine:latest
+```
+
+### ElasticSearch
+
+```
+docker pull bitnami/elasticsearch:latest
+
+docker run -itd \
+ --name elasticsearch \
+ -p 9200:9200 \
+ -p 9300:9300 \
+ -e ELASTICSEARCH_USERNAME=elastic \
+ -e ELASTICSEARCH_PASSWORD=elastic \
+ -e xpack.security.enabled=true \
+ -e discovery.type=single-node \
+ -e http.cors.enabled=true \
+ -e http.cors.allow-origin=http://localhost:13580,http://127.0.0.1:13580 \
+ -e http.cors.allow-headers=X-Requested-With,X-Auth-Token,Content-Type,Content-Length,Authorization \
+ -e http.cors.allow-credentials=true \
+ bitnami/elasticsearch:latest
+
+docker pull appbaseio/dejavu:latest
+
+docker run -itd \
+ --name dejavu-test \
+ -p 13580:1358 \
+ appbaseio/dejavu:latest
+
+http://localhost:13580/
+```
+
+### Clickhouse
+
+```
+docker pull yandex/clickhouse-server:latest
+docker pull clickhouse/clickhouse-server:latest
+
+# 8123为http接口 9000为tcp接口 9004为mysql接口
+# 推荐使用DBeaver作为客户端
+docker run -itd \
+ --name clickhouse-server \
+ -p 8123:8123 \
+ -p 9000:9000 \
+ -p 9004:9004 \
+ --network=app-tier \
+ --ulimit \
+ nofile=262144:262144 \
+ clickhouse/clickhouse-server:latest
+```
+
+- 默认账号: default
+- 密码:无
+
+### Doris
+
+首先要配置Java虚拟机,需在宿主机执行如下命令:
+
+```
+sudo sysctl -w vm.max_map_count= 2000000
+```
+
+接着创建一个子网网桥:
+
+```
+docker network create --driver bridge --subnet=172.20.80.0/24 doris-network
+docker pull apache/doris:1.2.2-be-x86_64
+docker pull apache/doris:1.2.2-fe-x86_64
+
+docker run -itd \
+ --name=doris-fe \
+ --env FE_SERVERS="fe1:172.20.80.2:9010" \
+ --env FE_ID=1 \
+ -p 8030:8030 \
+ -p 9030:9030 \
+ -v /data/fe/doris-meta:/opt/apache-doris/fe/doris-meta \
+ -v /data/fe/conf:/opt/apache-doris/fe/conf \
+ -v /data/fe/log:/opt/apache-doris/fe/log \
+ --network=doris-network \
+ --ip=172.20.80.2 \
+ apache/doris:1.2.2-fe-x86_64
+
+docker run -itd \
+ --name=doris-be \
+ --env FE_SERVERS="fe1:172.20.80.2:9010" \
+ --env BE_ADDR="172.20.80.3:9050" \
+ -p 8040:8040 \
+ -v /data/be/storage:/opt/apache-doris/be/storage \
+ -v /data/be/conf:/opt/apache-doris/be/conf \
+ -v /data/be/log:/opt/apache-doris/be/log \
+ --network=doris-network \
+ --ip=172.20.80.3 \
+ apache/doris:1.2.2-be-x86_64
+```
+
+## NoSQL数据库
+
+------
+
+### MongoDB
+
+下载镜像:
+
+```
+docker pull bitnami/mongodb:latest
+docker pull bitnami/mongodb-exporter:latest
+```
+
+带密码安装:
+
+```
+docker run -itd \
+ --name mongodb-test \
+ -p 27017:27017 \
+ -e MONGODB_ROOT_USER=root \
+ -e MONGODB_ROOT_PASSWORD=123456 \
+ -e MONGODB_USERNAME=test \
+ -e MONGODB_PASSWORD=123456 \
+ -e MONGODB_DATABASE=test \
+ bitnami/mongodb:latest
+```
+
+不带密码安装:
+
+```
+docker run -itd \
+ --name mongodb-test \
+ -p 27017:27017 \
+ -e ALLOW_EMPTY_PASSWORD=yes \
+ bitnami/mongodb:latest
+```
+
+有两点需要注意:
+
+1. 如果需要映射数据卷,需要把本地路径的所有权改到1001:`sudo chown -R 1001:1001 data/db`,否则会报错:`‘mkdir: cannot create directory ‘/bitnami/mongodb’: Permission denied’`;
+2. MongoDB 5.0开始有些机器运行会报错:`Illegal instruction`,这是因为机器硬件不支持 AVX 指令集 的缘故,没办法,MongoDB降级吧。
+
+### Redis
+
+```
+docker pull bitnami/redis:latest
+docker pull bitnami/redis-exporter:latest
+
+docker run -itd \
+ --name redis-server \
+ -p 6379:6379 \
+ -e ALLOW_EMPTY_PASSWORD=yes \
+ bitnami/redis:latest
+```
+
+### Memcached
+
+```
+docker pull bitnami/memcached:latest
+docker pull bitnami/memcached-exporter:latest
+
+docker run -itd \
+ --name memcached-test \
+ -p 11211:11211 \
+ bitnami/memcached:latest
+```
+
+### CouchDB
+
+```
+docker pull bitnami/couchdb:latest
+
+docker run -itd \
+ --name couchdb-test \
+ -p 5984:5984 \
+ -p 9100:9100 \
+ -e COUCHDB_PORT_NUMBER=5984
+ -e COUCHDB_CLUSTER_PORT_NUMBER=9100
+ -e COUCHDB_USER=admin
+ -e COUCHDB_PASSWORD=couchdb
+ bitnami/couchdb:latest
+```
+
+### Cassandra
+
+```
+docker pull bitnami/cassandra:latest
+docker pull bitnami/cassandra-exporter:latest
+
+docker run -itd \
+ --name cassandra-test \
+ -p 7000:7000 \
+ -p 9042:9042 \
+ -e CASSANDRA_USER=cassandra \
+ -e CASSANDRA_PASSWORD=cassandra \
+ bitnami/cassandra:latest
+```
+
+## 服务发现注册
+
+------
+
+### etcd
+
+```
+docker pull bitnami/etcd:latest
+
+docker run -itd \
+ --name etcd-standalone \
+ -p 2379:2379 \
+ -p 2380:2380 \
+ -e ETCDCTL_API=3 \
+ -e ALLOW_NONE_AUTHENTICATION=yes \
+ -e ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379 \
+ bitnami/etcd:latest
+```
+
+- 管理工具: [etcd-manager](https://www.electronjs.org/apps/etcd-manager)
+
+### Nacos
+
+```
+docker pull nacos/nacos-server:latest
+
+docker run -itd \
+ --name nacos-standalone \
+ -e MODE=standalone \
+ -p 8849:8848 \
+ nacos/nacos-server:latest
+```
+
+- 管理后台: http://localhost:8849/nacos/index.html
+
+### Consul
+
+```
+docker pull bitnami/consul:latest
+docker pull bitnami/consul-exporter:latest
+
+docker run -itd \
+ --name consul-server-standalone \
+ -p 8300:8300 \
+ -p 8500:8500 \
+ -p 8600:8600/udp \
+ -e CONSUL_BIND_INTERFACE='eth0' \
+ -e CONSUL_AGENT_MODE=server \
+ -e CONSUL_ENABLE_UI=true \
+ -e CONSUL_BOOTSTRAP_EXPECT=1 \
+ -e CONSUL_CLIENT_LAN_ADDRESS=0.0.0.0 \
+ bitnami/consul:latest
+```
+
+- 管理后台: [http://localhost:8500](http://localhost:8500/)
+
+### Apollo
+
+注意,先要导入SQL数据!
+
+```
+docker pull apolloconfig/apollo-portal:latest
+docker pull apolloconfig/apollo-configservice:latest
+docker pull apolloconfig/apollo-adminservice:latest
+
+#
+ docker run -itd \
+ --name apollo-configservice \
+ -p 8080:8080 \
+ -e SPRING_DATASOURCE_URL="jdbc:mysql://127.0.0.1:3306/ApolloConfigDB?characterEncoding=utf8" \
+ -e SPRING_DATASOURCE_USERNAME=root \
+ -e SPRING_DATASOURCE_PASSWORD=123456 \
+ -v /tmp/logs:/opt/logs \
+ apolloconfig/apollo-configservice:latest
+
+docker run -itd \
+ --name apollo-adminservice \
+ -p 8090:8090 \
+ -e SPRING_DATASOURCE_URL="jdbc:mysql://127.0.0.1:3306/ApolloConfigDB?characterEncoding=utf8" \
+ -e SPRING_DATASOURCE_USERNAME=root \
+ -e SPRING_DATASOURCE_PASSWORD=123456 \
+ -v /tmp/logs:/opt/logs \
+ apolloconfig/apollo-adminservice:latest
+
+docker run -itd \
+ --name apollo-portal \
+ -p 8070:8070 \
+ -e SPRING_DATASOURCE_URL="jdbc:mysql://127.0.0.1:3306/ApolloPortalDB?characterEncoding=utf8" \
+ -e SPRING_DATASOURCE_USERNAME=root \
+ -e SPRING_DATASOURCE_PASSWORD=123456 \
+ -e APOLLO_PORTAL_ENVS=dev \
+ -e DEV_META=http://127.0.0.1:8080 \
+ -v /tmp/logs:/opt/logs \
+ apolloconfig/apollo-portal:latest
+```
+
+- Eureka管理后台:
+- Apollo管理后台:
+ 账号密码: apollo / admin
+
+## 消息队列
+
+------
+
+### RabbitMQ
+
+```
+docker pull bitnami/rabbitmq:latest
+
+docker run -itd \
+ --hostname localhost \
+ --name rabbitmq-test \
+ -p 15672:15672 \
+ -p 5672:5672 \
+ -p 1883:1883 \
+ -p 15675:15675 \
+ -e RABBITMQ_PLUGINS=rabbitmq_top,rabbitmq_mqtt,rabbitmq_web_mqtt,rabbitmq_prometheus,rabbitmq_stomp,rabbitmq_auth_backend_http \
+ bitnami/rabbitmq:latest
+
+# 查看插件列表
+rabbitmq-plugins list
+# rabbitmq_peer_discovery_consul
+rabbitmq-plugins --offline enable rabbitmq_peer_discovery_consul
+# rabbitmq_mqtt 提供与后端服务交互使用,端口1883
+rabbitmq-plugins enable rabbitmq_mqtt
+# rabbitmq_web_mqtt 提供与前端交互使用,端口15675
+rabbitmq-plugins enable rabbitmq_web_mqtt
+#
+rabbitmq-plugins enable rabbitmq_auth_backend_http
+```
+
+- 管理后台: [http://localhost:15672](http://localhost:15672/)
+- 默认账号: user
+- 默认密码: bitnami
+
+### Kafka
+
+#### With ZooKeeper
+
+```
+docker pull bitnami/zookeeper:latest
+docker pull bitnami/kafka:latest
+docker pull bitnami/kafka-exporter:latest
+
+docker run -itd \
+ --name zookeeper-server \
+ --network app-tier \
+ -p 2181:2181 \
+ -e ALLOW_ANONYMOUS_LOGIN=yes \
+ bitnami/zookeeper:latest
+
+docker run -itd \
+ --name kafka-standalone \
+ --link zookeeper-server \
+ --network app-tier \
+ -p 9092:9092 \
+ -v /home/data/kafka:/bitnami/kafka \
+ -e KAFKA_ENABLE_KRAFT=no \
+ -e KAFKA_BROKER_ID=1 \
+ -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
+ -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://host.docker.internal:9092 \
+ -e KAFKA_ZOOKEEPER_CONNECT=zookeeper-server:2181 \
+ -e ALLOW_PLAINTEXT_LISTENER=yes \
+ --user root \
+ bitnami/kafka:latest
+```
+
+#### With KRaft
+
+```
+docker pull bitnami/kafka:latest
+
+docker run -itd \
+ --name kafka-standalone \
+ --user root \
+ -p 9092:9092 \
+ -p 9093:9093 \
+ -v /home/data/kafka:/bitnami/kafka \
+ -e KAFKA_ENABLE_KRAFT=yes \
+ -e KAFKA_CFG_NODE_ID=1 \
+ -e KAFKA_CFG_PROCESS_ROLES=broker,controller \
+ -e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \
+ -e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@127.0.0.1:9093 \
+ -e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT \
+ -e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093 \
+ -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 \
+ -e ALLOW_PLAINTEXT_LISTENER=yes \
+ bitnami/kafka:latest
+```
+
+#### 管理工具
+
+- [Offset Explorer](https://www.kafkatool.com/download.html)
+
+### NSQ
+
+```
+docker pull nsqio/nsq:latest
+
+# nsqlookupd
+docker run -d \
+ --name nsqlookupd \
+ -p 4160:4160 \
+ -p 4161:4161 \
+ nsqio/nsq:latest \
+ /nsqlookupd
+
+# nsqd
+docker run -itd \
+ --name nsqd \
+ -p 4150:4150 \
+ -p 4151:4151 \
+ --link nsqlookupd \
+ nsqio/nsq:latest \
+ /nsqd --lookupd-tcp-address=nsqlookupd:4160
+
+#nsqadmin
+docker run -itd \
+ --name nsqadmin \
+ -p 4171:4171 \
+ --link nsqlookupd \
+ nsqio/nsq:latest \
+ /nsqadmin --lookupd-http-address=nsqlookupd:4161
+```
+
+- 控制台访问地址: [http://127.0.0.1:4171](http://127.0.0.1:4171/)
+- 直接使用REST API查看节点信息: http://127.0.0.1:4161/nodes
+
+### NATS
+
+```
+docker pull bitnami/nats:latest
+docker pull bitnami/nats-exporter:latest
+
+docker run -itd \
+ --name nats-server \
+ --p 4222:4222 \
+ --p 6222:6222 \
+ --p 8000:8222 \
+ -e NATS_HTTP_PORT_NUMBER=8222 \
+ bitnami/nats:latest
+```
+
+- 管理后台: [https://127.0.0.1:8000](https://127.0.0.1:8000/)
+
+### Mosquitto
+
+```
+docker pull eclipse-mosquitto:latest
+
+# 1883 tcp
+# 9001 websockets
+docker run -itd \
+ --name mosquitto-test \
+ -p 1883:1883 \
+ -p 9001:9001 \
+ eclipse-mosquitto:latest
+```
+
+### EMX
+
+```
+docker pull emqx/emqx:latest
+
+docker run -itd \
+ --name emqx-test \
+ --add-host=host.docker.internal:host-gateway \
+ -p 18083:18083 \
+ -p 8883:8883 \
+ -p 1883:1883 \
+ -p 8083:8083 \
+ -p 8084:8084 \
+ emqx/emqx:latest
+```
+
+端口说明:
+
+- 18083 - 管理后台
+- 8883 - SSL
+- 1883 - TCP
+- 8083 - WS
+- 8084 - WSS
+- 管理后台: [http://localhost:18083](http://localhost:18083/)
+- 默认账号: admin
+- 默认密码: public
+
+### Pulsar
+
+```
+docker pull apachepulsar/pulsar-manager:latest
+docker pull apachepulsar/pulsar:latest
+
+docker run -itd \
+ -p 6650:6650 \
+ -p 8080:8080 \
+ --name pulsar-standalone \
+ apachepulsar/pulsar:latest bin/pulsar standalone
+
+docker run -itd \
+ -p 9527:9527 \
+ -p 7750:7750 \
+ -e SPRING_CONFIGURATION_FILE=/pulsar-manager/pulsar-manager/application.properties \
+ apachepulsar/pulsar-manager:latest
+docker pull apachepulsar/pulsar-standalone:latest
+
+docker run -itd \
+ -p 6650:6650 \
+ -p 8080:8080 \
+ -p 9527:9527 \
+ --name pulsar-standalone \
+ apachepulsar/pulsar:latest bin/pulsar standalone
+docker pull apachepulsar/pulsar-all:latest
+```
+
+- 管理后台 [http://localhost:9527](http://localhost:9527/)
+
+### HiveMQ
+
+```
+docker pull hivemq/hivemq4:latest
+
+docker run -itd \
+ --name hivemq-test \
+ --ulimit nofile=500000:500000 \
+ -p 8080:8080 \
+ -p 8000:8000 \
+ -p 1883:1883 \
+ hivemq/hivemq4:latest
+```
+
+### RocketMQ
+
+#### RocketMQ4.x
+
+至少启动一个NameServer,一个Broker。
+
+```
+docker pull apache/rocketmq:4.9.2
+
+# NameServer
+docker run -d \
+ --name rocketmq-namesrv \
+ -e "JAVA_OPT_EXT=-server -Xms512M -Xmx512M -Xmn128m" \
+ -p 9876:9876 \
+ apache/rocketmq:4.9.2 \
+ sh mqnamesrv
+
+# Broker
+docker run -d \
+ --name rocketmq-broker \
+ -p 10911:10911 \
+ -p 10909:10909 \
+ -p 10912:10912 \
+ --link rocketmq-namesrv \
+ -e "JAVA_OPT_EXT=-server -Xms512M -Xmx512M -Xmn128m" \
+ -e "NAMESRV_ADDR=rocketmq-namesrv:9876" \
+ apache/rocketmq:4.9.2 \
+ sh mqbroker -c /home/rocketmq/rocketmq-4.9.2/conf/broker.conf
+```
+
+以及Web控制台:
+
+```
+docker pull styletang/rocketmq-console-ng:latest
+
+docker run -d \
+ --name rocketmq-console \
+ -p 9800:8080 \
+ --link rocketmq-namesrv \
+ -e "JAVA_OPT_EXT=-server -Xms512M -Xmx512M -Xmn128m" \
+ -e "JAVA_OPTS=-Xmx256M -Xms256M -Xmn128M -Drocketmq.namesrv.addr=rocketmq-namesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false" \
+ -t styletang/rocketmq-console-ng:latest
+```
+
+RocketMQ Console 是 rocketmq 的第三方扩展组件,提供图形界面便于管理和监控rocketmq。
+
+- 控制台访问地址: http://localhost:9800/#/
+
+需要注意的是,NameServer下发的是Docker容器的内网IP地址,从宿主机的外网访问是访问不了的,需要进行配置:
+
+```
+vi /home/rocketmq/rocketmq-5.1.4/conf/broker.conf
+```
+
+添加如下配置,brokerIP1可以是ip也可以是dns,hostname:
+
+```
+brokerIP1 = host.docker.internal
+```
+
+#### RocketMQ5.x
+
+至少启动一个NameServer,一个Broker。
+
+5.x版本下,官方建议使用Local模式部署,即Broker和Proxy同进程部署。
+
+```
+docker pull apache/rocketmq:5.1.4
+
+# NameServer
+docker run -d \
+ --name rocketmq-namesrv \
+ -e "MAX_HEAP_SIZE=256M" \
+ -e "HEAP_NEWSIZE=128M" \
+ -p 9876:9876 \
+ apache/rocketmq:5.1.4 \
+ sh mqnamesrv
+
+# Broker
+docker run -d \
+ --name rocketmq-broker \
+ --link rocketmq-namesrv \
+ -p 10911:10911 \
+ -p 10909:10909 \
+ -p 10912:10912 \
+ -p 8080:8080 \
+ -p 8081:8081 \
+ -e "MAX_HEAP_SIZE=256M" \
+ -e "HEAP_NEWSIZE=128M" \
+ -e "JAVA_OPTS=-server -Xmx256M -Xms256M -Xmn128M" \
+ -e "NAMESRV_ADDR=rocketmq-namesrv:9876" \
+ apache/rocketmq:5.1.4 \
+ sh mqbroker --enable-proxy autoCreateTopicEnable=true autoCreateSubscriptionGroup=true \
+ -c /home/rocketmq/rocketmq-5.1.4/conf/broker.conf
+```
+
+以及Web控制台:
+
+```
+docker run -d \
+ --restart=always \
+ --name rocketmq-dashboard \
+ --link rocketmq-namesrv \
+ -e "JAVA_OPTS=-Xmx256M -Xms256M -Xmn128M -Drocketmq.namesrv.addr=rocketmq-namesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false" \
+ -p 9800:8080 \
+ apacherocketmq/rocketmq-dashboard
+```
+
+- 控制台访问地址: http://localhost:9800/#/
+
+### ActiveMQ
+
+```
+docker pull rmohr/activemq:latest
+
+docker run -d \
+ --name activemq-test \
+ -p 61616:61616 \
+ -p 8161:8161 \
+ -p 61613:61613 \
+ -p 1883:1883 \
+ -p 61614:61614 \
+ rmohr/activemq:latest
+```
+
+| 端口号 | 协议 |
+| :----- | :---- |
+| 61616 | JMS |
+| 8161 | UI |
+| 5672 | AMQP |
+| 61613 | STOMP |
+| 1883 | MQTT |
+| 61614 | WS |
+
+- 管理后台:http://localhost:8161/admin/
+- 默认账号名密码:admin/admin
+
+### Asynq
+
+```
+docker pull hibiken/asynqmon:latest
+
+docker run -d \
+ --name asynq \
+ -p 8080:8080 \
+ hibiken/asynqmon:latest --redis-addr=host.docker.internal:6379 --redis-password=123456 --redis-db=1
+```
+
+- 管理后台:[http://localhost:8080](http://localhost:8080/)
+
+## 微服务运行时
+
+### Dapr
+
+```
+docker pull daprio/dapr:latest
+```
+
+## 链路追踪
+
+### Jaeger
+
+```
+docker pull jaegertracing/all-in-one:latest
+
+docker run -itd \
+ --name jaeger \
+ -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \
+ -e COLLECTOR_OTLP_ENABLED=true \
+ -p 6831:6831/udp \
+ -p 6832:6832/udp \
+ -p 5778:5778 \
+ -p 16686:16686 \
+ -p 4317:4317 \
+ -p 4318:4318 \
+ -p 14250:14250 \
+ -p 14268:14268 \
+ -p 14269:14269 \
+ -p 9411:9411 \
+ jaegertracing/all-in-one:latest
+```
+
+| 端口号 | 协议 | 组件 | 功能 |
+| :----- | :--- | :-------- | :----------------------------------------------------------- |
+| 6831 | UDP | Agent | Thrift-compact协议,接收`jaeger.thrift`数据(大多数 SDK 使用) |
+| 6832 | UDP | Agent | Thrift-binary协议,接收`jaeger.thrift`数据(由 Node.js SDK 使用) |
+| 5775 | UDP | Agent | ~~Thrift-compact协议,接收`zipkin.thrift`数据(仅供旧客户端使用)~~(已弃用) |
+| 5778 | HTTP | Agent | 服务配置接口(采样等) |
+| 16686 | HTTP | Query | Jaeger Web UI的服务前端 |
+| 4317 | HTTP | Collector | 如果启用,通过 gRPC 接收 OpenTelemetry 协议 (OTLP) |
+| 4318 | HTTP | Collector | 如果启用,通过 HTTP 接收 OpenTelemetry 协议 (OTLP) |
+| 14268 | HTTP | Collector | 直接接收`jaeger.thrift`客户端 |
+| 14269 | HTTP | Collector | 提供:健康检查`/`、性能检查`/metrics` |
+| 14250 | HTTP | Collector | 接收`model.proto` |
+| 9411 | HTTP | Collector | 兼容Zipkin的http端点(可选) |
+
+- API:http://localhost:14268/api/traces
+- Zipkin API:http://localhost:9411/api/v2/spans
+- 后台: [http://localhost:16686](http://localhost:16686/)
+
+### Zipkin
+
+```
+docker pull openzipkin/zipkin:latest
+
+docker run -d \
+ --name zipkin \
+ -p 9411:9411 \
+ openzipkin/zipkin:latest
+```
+
+- API:http://localhost:9411/api/v2/spans
+- 后台: [http://localhost:9411](http://localhost:9411/)
+
+### SkyWalking
+
+```
+docker pull apache/skywalking-oap-server:latest
+docker pull apache/skywalking-ui:latest
+
+# 11800端口用于skywalking将应用的服务监控信息收集端口。
+# 12800端口用于skywalking对UI提供查询接口。
+docker run -itd \
+ --name skywalking-oap-server \
+ -e TZ=Asia/Shanghai \
+ -p 12800:12800 \
+ -p 11800:11800 \
+ --link elasticsearch \
+ -e SW_STORAGE=elasticsearch \
+ -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 \
+ apache/skywalking-oap-server:latest
+
+docker run -itd \
+ --name skywalking-ui \
+ -e TZ=Asia/Shanghai \
+ -p 8080:8080 \
+ --link skywalking-oap-server \
+ -e SW_OAP_ADDRESS=skywalking-oap-server:12800 \
+ apache/skywalking-ui:latest
+```
+
+- 后台: [http://localhost:8080](http://localhost:8080/)
+
+### Pinpoint
+
+```
+docker pull pinpointdocker/pinpoint-agent:latest
+```
+
+### Grafana Tempo
+
+```
+docker pull grafana/tempo:latest
+```
+
+## 运维监控
+
+### Kibana
+
+```
+docker pull bitnami/kibana:latest
+
+docker run -d \
+ --name kibana \
+ -p 5601:5601 \
+ -e KIBANA_ELASTICSEARCH_URL=elasticsearch \
+ -e KIBANA_ELASTICSEARCH_PORT_NUMBER=9200 \
+ bitnami/kibana:latest
+```
+
+### Prometheus
+
+```
+docker pull bitnami/prometheus:latest
+docker pull bitnami/pushgateway:latest
+
+docker run -d \
+ --name prometheus-gateway \
+ -p 9091:9091 \
+ bitnami/pushgateway:latest
+
+docker run -d \
+ --name prometheus \
+ -p 9090:9090 \
+ bitnami/prometheus:latest
+```
+
+- Prometheus 后台: [http://localhost:9090](http://localhost:9090/)
+- Pushgateway 后台: [http://localhost:9091](http://localhost:9091/)
+
+### Grafana
+
+```
+docker pull bitnami/grafana:latest
+
+docker run -d \
+ --name grafana \
+ -p 3000:3000 \
+ -e GF_SECURITY_ADMIN_PASSWORD=pass \
+ bitnami/grafana:latest
+```
+
+### Logstash
+
+```
+docker pull bitnami/logstash:latest
+docker pull bitnami/logstash-exporter:latest
+
+docker run -d \
+ --name logstash \
+ -p 8080:8080 \
+ bitnami/logstash:latest
+```
+
+### Fluentd
+
+```
+docker pull bitnami/fluentd:latest
+
+docker run -d \
+ --name fluentd \
+ -p 24224:24224 \
+ -p 24224:24224/udp \
+ -v /data:/opt/bitnami/fluentd/log \
+ bitnami/fluentd:latest
+```
+
+## 流式计算
+
+------
+
+### Spark
+
+```
+docker pull bitnami/spark:latest
+
+docker run -itd \
+ --name spark-standalone \
+ -p 6066:6066 \
+ -p 7077:7077 \
+ -p 8080:8080 \
+ -p 50070:50070 \
+ -e SPARK_MODE=master \
+ -e SPARK_WORKER_CORES=1 \
+ -e SPARK_WORKER_MEMORY=2g \
+ bitnami/spark:latest
+```
+
+- hdfs的web界面:[http://localhost:50070](http://localhost:50070/)
+- Spark界面:[http://localhost:8080](http://localhost:8080/)
+
+### Flink
+
+```
+docker pull flink:latest
+
+docker network create flink-network
+
+docker run -itd \
+ --name flink-jobmanager \
+ --network flink-network \
+ -p 8081:8081 \
+ --env FLINK_PROPERTIES="jobmanager.rpc.address: flink-jobmanager" \
+ flink:latest jobmanager
+
+docker run -itd \
+ --name flink-taskmanager \
+ --network flink-network \
+ --env FLINK_PROPERTIES="jobmanager.rpc.address: flink-jobmanager" \
+ flink:latest taskmanager
+```
+
+- 管理后台: [http://localhost:8081](http://localhost:8081/)
+
+## 对象存储
+
+------
+
+### MinIO
+
+```
+docker pull bitnami/minio:latest
+
+docker network create app-tier --driver bridge
+
+# MINIO_ROOT_USER 最少3个字符
+# MINIO_ROOT_PASSWORD 最少8个字符
+# 第一次运行的时候,服务会自动关闭,手动再启动就可以正常运行了.
+docker run -itd \
+ --name minio-server \
+ -p 9000:9000 \
+ -p 9001:9001 \
+ --env MINIO_ROOT_USER="root" \
+ --env MINIO_ROOT_PASSWORD="123456789" \
+ --env MINIO_DEFAULT_BUCKETS='images,videos' \
+ --env MINIO_FORCE_NEW_KEYS="yes" \
+ --env BITNAMI_DEBUG=true \
+ --volume /usr/local/minio/data:/data \
+ --network app-tier \
+ bitnami/minio:latest
+```
+
+- 管理后台: http://localhost:9001/login
+
+```
+docker pull minio/minio:latest
+
+# MINIO_ROOT_USER 最少3个字符,默认为:minioadmin
+# MINIO_ROOT_PASSWORD 最少8个字符,默认为:minioadmin
+docker run -itd \
+ --name minio-server \
+ -p 9000:9000 \
+ -p 9001:9001 \
+ -e "MINIO_ROOT_USER=root" \
+ -e "MINIO_ROOT_PASSWORD=123456789" \
+ -v /usr/local/minio/data:/data \
+ --network app-tier \
+ minio/minio server /data --console-address ':9001'
+```
+
+- 管理后台: http://localhost:9001/login
+
+## 机器学习
+
+------
+
+### TensorFlow
+
+```
+docker pull bitnami/tensorflow-resnet:latest
+docker pull bitnami/tensorflow-serving:latest
+docker pull bitnami/tensorflow-inception:latest
+
+docker network create app-tier --driver bridge
+
+docker run -d --name tensorflow-serving \
+ --volume /tmp/model-data:/bitnami/model-data \
+ --network app-tier \
+ bitnami/tensorflow-serving:latest
+
+docker run -d --name tensorflow-resnet \
+ --volume /tmp/model-data:/bitnami/model-data \
+ --network app-tier \
+ bitnami/tensorflow-resnet:latest
+```
+
+### PyTorch
+
+```
+docker pull bitnami/pytorch:latest
+```
+
+## API网关
+
+------
+
+### HAProxy
+
+```
+docker pull bitnami/haproxy:latest
+```
+
+### Kong
+
+```
+docker pull bitnami/kong:latest
+```
+
+### Nginx
+
+```
+docker pull bitnami/nginx:latest
+```
+
+### Envoy
+
+```
+docker pull bitnami/envoy:latest
+```
+
+### Caddy
+
+```
+docker pull caddy:latest
+```
+
+### APISIX
+
+```
+docker pull apache/apisix:latest
+docker pull apache/apisix-dashboard:latest
+
+docker run -itd \
+ --name apache-apisix \
+ -p 9080:9080 \
+ -e APISIX_STAND_ALONE=true \
+ --link etcd-standalone \
+ apache/apisix:latest
+```
+
+- 管理后台: http://127.0.0.1:8080/apisix/dashboard
+- 用户密码:admin/admin
+
+### Tyk
+
+```
+docker pull tykio/tyk-gateway:latest
+
+docker run -d \
+ --name tyk_gateway \
+ -p 8080:8080 \
+ -e TYK_GW_SECRET=[YOUR-SECRET] \
+ -v $(pwd)/tyk.conf:/opt/tyk-gateway/tyk.conf \
+ -v $(pwd)/apps:/opt/tyk-gateway/apps \
+ tykio/tyk-gateway:latest
+```
+
+### Gravitee
+
+```
+docker pull graviteeio/apim-gateway:latest
+docker pull graviteeio/apim-management-ui:latest
+docker pull graviteeio/apim-portal-ui:latest
+
+docker run -itd \
+ --publish 82:8082 \
+ --name gateway \
+ --env GRAVITEE_MANAGEMENT_MONGODB_URI=mongodb://username:password@mongohost:27017/dbname \
+ --detach \
+ graviteeio/apim-gateway:latest
+
+docker run -itd \
+ --publish 80:8080 \
+ --env MGMT_API_URL=http://localhost:81/management/organizations/DEFAULT/environments/DEFAULT \
+ --name management-ui \
+ --detach \
+ graviteeio/apim-management-ui:latest
+
+docker run -itd \
+ --publish 80:8080 \
+ --env PORTAL_API_URL=http://localhost:81/portal/environments/DEFAULT \
+ --name portal-ui \
+ --detach \
+ graviteeio/apim-portal-ui:latest
+```
+
+### Traefik
+
+```
+docker pull traefik:latest
+
+docker run -itd `
+ --name traefik-server `
+ -p 8080:8080 `
+ -p 80:80 `
+ -v /var/run/docker.sock:/var/run/docker.sock `
+ --link consul-server-standalone `
+ --add-host=host.docker.internal:host-gateway `
+ traefik:latest --api.insecure=true --providers.consul.endpoints="consul-server-standalone:8500"
+```
+
+- 管理后台:[http://localhost:8080](http://localhost:8080/)
\ No newline at end of file
diff --git a/环境/安装教程/GrayLog 分布式日志.md b/环境/安装教程/GrayLog 分布式日志.md
index 4e00b15..eb0ad93 100644
--- a/环境/安装教程/GrayLog 分布式日志.md
+++ b/环境/安装教程/GrayLog 分布式日志.md
@@ -1,3 +1,14 @@
+---
+dg-publish: true
+title: GrayLog 分布式日志
+date: 2024-01-09 21:14:07.464
+updated: 2024-01-11 09:47:32.724
+url: https://blog.hhdxw.top/archives/321
+tags:
+ - GrayLog
+categories:
+ - 日志管理平台
+---
`GrayLog`是一个轻量型的分布式日志管理平台,一个开源的日志聚合、分析、审计、展示和预警工具。在功能上来说,和 ELK类似,但又比 ELK要简单轻量许多。依靠着更加简洁,高效,部署使用简单的优势很快受到许多公司的青睐。
![](https://lsky.hhdxw.top/imghub/2024/01/image-202401091704805912.png)