diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..20c7847 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,7 @@ +*.js linguist-language=java + +*.css linguist-language=java + +*.html linguist-language=java + +*.sh linguist-language=java diff --git "a/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/README.md" "b/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/README.md" new file mode 100644 index 0000000..5b4b0f4 --- /dev/null +++ "b/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/README.md" @@ -0,0 +1,16 @@ +## 解压zip 添加插件 + + chrome浏览器右上角点开竖三点 —> 更多工具 —> 扩展程序 + + 亦或是在chrome浏览器输入框输入chrome://extensions/进入扩展程序 + + 在扩展程序页面,首先打开开发者模式, + + 接着点击加载已解压的扩展程序,选择刚才第一步解压的文件夹 + + +#### 插件网站 + + https://greasyfork.org/zh-CN + + \ No newline at end of file diff --git "a/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/yh-copy.js" "b/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/yh-copy.js" new file mode 100644 index 0000000..4dd2a1a --- /dev/null +++ "b/Chrome/\346\262\271\347\214\264\350\204\232\346\234\254/yh-copy.js" @@ -0,0 +1,142 @@ +// ==UserScript== +// @name 🔥🔥🔥文本选中复制🔥🔥🔥 +// @description 解除网站不允许复制的限制,文本选中后点击复制按钮即可复制,主要用于 百度文库 道客巴巴 无忧考网 学习啦 蓬勃范文 +// @namespace https://github.com/WindrunnerMax/TKScript +// @version 2.0.0 +// @author Czy +// @include *://wenku.baidu.com/view/* +// @include *://www.51test.net/show/* +// @include *://www.xuexi.la/* +// @include *://www.xuexila.com/* +// @include *://www.cspengbo.com/* +// @include *://www.doc88.com/* +// @license GPL License +// @require https://cdn.bootcss.com/jquery/2.1.2/jquery.min.js +// @require https://cdn.jsdelivr.net/npm/clipboard@2/dist/clipboard.min.js +// @connect res.doc88.com +// @grant unsafeWindow +// @grant GM_xmlhttpRequest +// ==/UserScript== + +(function () { + 'use strict'; + + function styleInject(css, ref) { + if (ref === void 0) ref = {}; + var insertAt = ref.insertAt; + + if (!css || typeof document === 'undefined') { + return; + } + + var head = document.head || document.getElementsByTagName('head')[0]; + var style = document.createElement('style'); + style.type = 'text/css'; + + if (insertAt === 'top') { + if (head.firstChild) { + head.insertBefore(style, head.firstChild); + } else { + head.appendChild(style); + } + } else { + head.appendChild(style); + } + + if (style.styleSheet) { + style.styleSheet.cssText = css; + } else { + style.appendChild(document.createTextNode(css)); + } + } + + var css_248z = "#_copy{width:60px;height:30px;background:#4c98f7;color:#fff;position:absolute;z-index:1000;display:flex;justify-content:center;align-items:center;border-radius:3px;font-size:13px;cursor:pointer}div[id^=reader-helper]{display:none!important}"; + styleInject(css_248z); + + function initEvent($, ClipboardJS) { + $("body").on("mousedown", function (e) { + $("#_copy").remove(); + }); + + document.oncopy = function () {}; + + $("body").on("copy", function (e) { + e.stopPropagation(); + return true; + }); + ClipboardJS.prototype.on('success', function (e) { + $("#_copy").html("复制成功"); + setTimeout(function () { + return $("#_copy").fadeOut(1000); + }, 1000); + e.clearSelection(); + }); + ClipboardJS.prototype.on('error', function (e) { + $("#_copy").html("复制失败"); + setTimeout(function () { + return $("#_copy").fadeOut(1000); + }, 1000); + e.clearSelection(); + }); + } + + var path = ""; + + function init() { + GM_xmlhttpRequest({ + method: "GET", + url: "https://res.doc88.com/assets/js/v2.js", + onload: function onload(response) { + var view = new Function("var view = " + response.responseText.replace("eval", "") + "; return view;"); + path = //.exec(view())[1]; + } + }); + } + + function getSelectedText() { + return unsafeWindow.Viewer[path]; + } + + var doc88 = { + init: init, + getSelectedText: getSelectedText + }; + + function initWebsite($, ClipboardJS) { + if (window.location.href.match(/.*www\.doc88\.com\/.+/)) doc88.init(); + } + + function getSelectedText$1() { + if (window.location.href.match(/.*www\.doc88\.com\/.+/)) return doc88.getSelectedText(); + if (window.getSelection) return window.getSelection().toString();else if (document.getSelection) return document.getSelection();else if (document.selection) return document.selection.createRange().text; + return ""; + } + + (function () { + var $ = window.$; + var ClipboardJS = window.ClipboardJS; // https://clipboardjs.com/#example-text + + initEvent($, ClipboardJS); + initWebsite(); + document.addEventListener("mouseup", function (e) { + var copyText = getSelectedText$1(); + if (copyText) console.log(copyText);else return ""; + $("#_copy").remove(); + var template = "\n
\u590D\u5236
\n "); + $("body").append(template); + $("#_copy").on("mousedown", function (event) { + event.stopPropagation(); + }); + $("#_copy").on("mouseup", function (event) { + event.stopPropagation(); + }); + new ClipboardJS('#_copy'); + }); + })(); + /** + * https://www.wenku.zone/ + * http://wenku.baiduvvv.com/ + * https://www.huiyingwu.com/1718/ + */ + +}()); diff --git "a/Chrome/\347\275\221\351\241\265\346\210\252\345\217\226\351\225\277\345\233\276.md" "b/Chrome/\347\275\221\351\241\265\346\210\252\345\217\226\351\225\277\345\233\276.md" new file mode 100644 index 0000000..36499e6 --- /dev/null +++ "b/Chrome/\347\275\221\351\241\265\346\210\252\345\217\226\351\225\277\345\233\276.md" @@ -0,0 +1,8 @@ +#截取页面长图 + +##### 按 F12 +##### 接着我们使用组合键Ctrl+shift+P打开查找文件窗口 + +##### 然后我们在工具栏输入:Capture full size screenshot + +### 这个时候就会开始截图 \ No newline at end of file diff --git "a/Discuz/\345\210\240\351\231\244\350\264\246\345\217\267.md" "b/Discuz/\345\210\240\351\231\244\350\264\246\345\217\267.md" new file mode 100644 index 0000000..3aea61d --- /dev/null +++ "b/Discuz/\345\210\240\351\231\244\350\264\246\345\217\267.md" @@ -0,0 +1,5 @@ +# 删除论坛账号 + location.href=((d=(await(await fetch("./home.php?mod=spacecp&ac=avatar",{credentials:'include'})).text()).match(/\/\/\S+\/images\/ca\S+&ag/g)[0].replace('images/camera.swf?','?m=user&a=delete&'))&&confirm('真的要[永久]删除你的ID?'))?d:''; + + + \ No newline at end of file diff --git a/README.md b/README.md index 0776f7c..394a764 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,4 @@ -# CodingNote -码农笔记、跟上dalao 们的步伐 ~ +# Note + 码农笔记、跟上dalao 们的步伐 ~ + +> 不定时更新 QAQ diff --git a/apk/juicessh-2.1.4.apk b/apk/juicessh-2.1.4.apk new file mode 100644 index 0000000..1d37a33 Binary files /dev/null and b/apk/juicessh-2.1.4.apk differ diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..039d832 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,98 @@ +# Docker +### Docker 是一个开源的应用容器引擎,基于 Go 语言 并遵从Apache2.0协议开源。 + + 1、简化程序: + Docker 让开发者可以打包他们的应用以及依赖包到一个可移植的容器中,然后发布到任何流行的 Linux 机器上,便可以实现虚拟化。 + Docker改变了虚拟化的方式使开发者可以直接将自己的成果放入Docker中进行管理。方便快捷已经是 Docker的最大优势, + 过去需要用数天乃至数周的任务,在Docker容器的处理下,只需要数秒就能完成。 + + 2、避免选择恐惧症: + 如果你有选择恐惧症,还是资深患者。Docker 帮你打包你的纠结!比如 Docker 镜像;Docker 镜像中包含了运行环境和配置, + 所以 Docker 可以简化部署多种应用实例工作。比如 Web 应用、后台应用、数据库应用、大数据应用比如 Hadoop 集群、 + 消息队列等等都可以打包成一个镜像部署。 + + 3、节省开支: + 一方面,云计算时代到来,使开发者不必为了追求效果而配置高额的硬件,Docker 改变了高性能必然高价格的思维定势。 + Docker 与云的结合,让云空间得到更充分的利用。 + 不仅解决了硬件管理的问题,也改变了虚拟化的方式。 + + + 总之牛逼 : + + 1.速度飞快以及优雅的隔离框架 + 2.物美价廉 + 3.CPU/内存的低消耗 + 4.快速开/关机 + 5.跨云计算基础构架 + + +---------------------------- + 食用方法 +---------------------------- + +## 安装 + +>sudo yum -y install docker-io + +//添加docker用户组 + +>sudo groupadd docker + +//将登陆用户加入到docker用户组中 + +>sudo gpasswd -a $USER docker + +//更新用户组 + +>newgrp docker + +//测试docker命令是否可以使用sudo正常使用 + +>docker ps + +//启动 +>service docker start + + +//查看容器启动失败日志 +>docker logs -f -t --tail 20 614f5d8942ef + +//查询容器信息 +> docker inspect 614f5d8942ef + +## Hello Docker + +//创建一个BusyBox,它是一个最小的Linux系统,它提供了该系统的主要功能,不包含一些与GNU相关的功能和选项。 +>sudo docker pull busybox + +// 运行 Hello Docker +>docker run busybox /bin/echo Hello Docker + +//现在,让我们以后台进程的方式运行hello docker +>ojbk=$(docker run -d busybox /bin/sh -c "while true; do echo Hello Docker; sleep 2; done") + +//过10s后查看一下 ojbk 打印了多少个 Hello Docker 运行docker logs $ojbk +>docker logs $ojbk + +//ojbk 每2s 就会打印一次 Hello Docker 你再输入一次 docker logs $ojbk 就会发现 Hello Docker 越来越多了。 + +//停止后台进程方式运行方法 +>docker stop $ojbk + +//重启就使用 restart 即 docker restart $ojbk + +//如果要完全移除容器,需要先将该容器停止,然后才能移除。 +>docker rm $ojbk + +//将容器的状态保存为镜像.注意,镜像名称只能取字符[a-z]和数字[0-9]。 +>docker commit $ojbk new1 + +//查看所有镜像的列表 +>docker images + +//查看所有Docker命令 +>docker help + +![image](http://wx1.sinaimg.cn/mw690/0060lm7Tly1ftvkcuq4kvg300u00umx0.gif) + + \ No newline at end of file diff --git "a/docker/docker \345\256\271\345\231\250\344\270\255\346\227\240\346\263\225\344\275\277\347\224\250ps \345\221\275\344\273\244.md" "b/docker/docker \345\256\271\345\231\250\344\270\255\346\227\240\346\263\225\344\275\277\347\224\250ps \345\221\275\344\273\244.md" new file mode 100644 index 0000000..04458c6 --- /dev/null +++ "b/docker/docker \345\256\271\345\231\250\344\270\255\346\227\240\346\263\225\344\275\277\347\224\250ps \345\221\275\344\273\244.md" @@ -0,0 +1,8 @@ +# 一般都是极简镜像 + + ps +>apt-get update && apt-get install procps + + vim + +>apt-get update && apt-get install vim diff --git "a/docker/docker-ce\346\226\260\347\211\210\346\234\254io\345\267\262\347\273\217\345\272\237\345\274\203.md" "b/docker/docker-ce\346\226\260\347\211\210\346\234\254io\345\267\262\347\273\217\345\272\237\345\274\203.md" new file mode 100644 index 0000000..8874729 --- /dev/null +++ "b/docker/docker-ce\346\226\260\347\211\210\346\234\254io\345\267\262\347\273\217\345\272\237\345\274\203.md" @@ -0,0 +1,8 @@ +# 新版docker 安装 + +>yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo + +> sudo yum install docker-ce + +> + \ No newline at end of file diff --git a/docker/docker-es.md b/docker/docker-es.md new file mode 100644 index 0000000..823e59d --- /dev/null +++ b/docker/docker-es.md @@ -0,0 +1,36 @@ +# elasticsearch docker 安装 + + + +``` + +docker run --privileged=true -d -e ES_JAVA_POTS="-Xms512m -Xmx512m" -e "discovery.type=single-node" -p 9200:9200 -p 9300:9300 --name es7.12.1 \ +-v /disk1/dockerContainer/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \ +-v /home/esdata/data:/usr/share/elasticsearch/data \ +-v /etc/localtime:/etc/localtime \ +-v /disk1/dockerContainer/elasticsearch/plugins:/usr/share/elasticsearch/plugins elasticsearch:7.12.1 + + + +``` + + +### + +可以设置容器跟随Docker启动 + +>docker update es7.12.1 --restart=always + + + + +## ik分词器 + +> https://github.com/medcl/elasticsearch-analysis-ik/releases + + +找到对应版本直接下载。 + +然后在 /disk1/dockerContainer/elasticsearch/plugins 目录下创建一个ik文件夹 + +把elasticsearch-analysis-ik-7.12.1.zip 解压进去重启es完事 diff --git a/docker/docker-influxdb.md b/docker/docker-influxdb.md new file mode 100644 index 0000000..c185833 --- /dev/null +++ b/docker/docker-influxdb.md @@ -0,0 +1,82 @@ +# docker 安装 influxdb + +### 安装 + +此方式 环境变量有效 但要进入 容器 docker exec -it influxdb /bin/bash + +cd /etc/influxdb 目录才能去修改配置文件 + +``` +docker run --name influxdb --restart=on-failure:3 --privileged=true -d -p 11034:8086 \ +-e INFLUXDB_DB=test \ +-e INFLUXDB_ADMIN_USER=admin -e INFLUXDB_ADMIN_PASSWORD=admin \ +-e INFLUXDB_USER=root -e INFLUXDB_USER_PASSWORD=root \ +-v /disk1/dockerContainer/influxdb:/var/lib/influxdb \ +influxdb:1.8.2 + + +``` + + +### 安装 + +配置了 配置文件映射目录然而 -e的环境变量失效了? + +``` + +docker run --name influxdb --restart=on-failure:3 --privileged=true -d -p 11034:8086 \ +-e INFLUXDB_DB=test \ +-e INFLUXDB_ADMIN_USER=admin -e INFLUXDB_ADMIN_PASSWORD=admin \ +-e INFLUXDB_USER=root -e INFLUXDB_USER_PASSWORD=root \ +-v /disk1/dockerContainer/influxdb/conf:/etc/influxdb \ +-v /disk1/dockerContainer/influxdb/databases:/var/lib/influxdb \ +influxdb:1.8.2 + + +``` + +>setfacl -m u:username:rwx -R /disk1/dockerContainer/influxdb + +因此 手动 在/disk1/dockerContainer/influxdb/databases 下 创建 + +mv data meta wal + +在 /disk1/dockerContainer/influxdb/conf下 创建配置文件 +vi influxdb.conf + +``` +[meta] + dir = "/var/lib/influxdb/meta" + +[data] + dir = "/var/lib/influxdb/data" + engine = "tsm1" + wal-dir = "/var/lib/influxdb/wal" + + series-id-set-cache-size = 100 + +[http] + enabled = true + bind-address = ":8086" + auth-enabled = false + +``` + +重启docker + +>docker restart xxx + +连上 influxdb 创建用户 + +>create user "root" with password 'root' with ALL PRIVILEGES + +修改密码 + +>SET PASSWORD FOR = '' + +然后把配置文件 auth-enabled = true + + +重启docker + +>docker restart xxx diff --git a/docker/docker-minio.md b/docker/docker-minio.md new file mode 100644 index 0000000..9a289c0 --- /dev/null +++ b/docker/docker-minio.md @@ -0,0 +1,54 @@ +## minio + +### 新版本 + +``` +docker run -itd -p 9000:9000 -p 9001:9001 --name minio \ +-e "MINIO_ROOT_USER=ojbk" \ +-e "MINIO_ROOT_PASSWORD=ojbk.plus" \ +-v /disk1/dockerContainer/minio/data:/data \ +-v /disk1/dockerContainer/minio/config:/root/.minio \ +-d minio/minio server /data --console-address ":9001" + + +``` + +### 旧版本 + + minio/minio 会自动拉去最新版 所以请用下面的指定版本方式 + + 新旧版本的区别大概就是 + + Console 登录账号:MINIO_ROOT_USER替代了MINIO_SECRET + + Console 登录密码:MINIO_ROOT_PASSWORD替代了MINIO_SECRET_KEY + + 另外一处变化则为web管理的地址和API地址已经分离,需要参数配置--console-address ":9001" + + +``` + +docker run -itd -p 9000:9000 --name minio \ +-e "MINIO_ACCESS_KEY=ojbk" \ +-e "MINIO_SECRET_KEY=ojbk.plus" \ +-v /disk1/dockerContainer/minio/data:/data \ +-v /disk1/dockerContainer/minio/config:/root/.minio \ +minio/minio server /data +``` + + +### 指定版本 + +``` + +docker pull minio/minio:RELEASE.2021-06-17T00-10-46Z +docker tag minio/minio:RELEASE.2021-06-17T00-10-46Z minio/minio:ojbk + +docker run -itd -p 10128:9000 --name minio \ +-e "MINIO_ACCESS_KEY=ojbk" \ +-e "MINIO_SECRET_KEY=ojbk.plus" \ +-v /disk1/dockerContainer/minio/data:/data \ +-v /disk1/dockerContainer/minio/config:/root/.minio \ +minio/minio:ojbk server /data + +``` diff --git a/docker/docker-mongo.md b/docker/docker-mongo.md new file mode 100644 index 0000000..767b9dd --- /dev/null +++ b/docker/docker-mongo.md @@ -0,0 +1,70 @@ +# docker-mongo5.0.6 + +1.拉取mongo 镜像 + +>docker pull mongo:5.0.6 + +2. 初始化启动 + +``` + +docker run --restart=on-failure:3 --privileged=true -d \ +--name mongo5.0.6 \ +-v /disk1/dockerContainer/mongodb/datadb:/data/db \ +-v /etc/localtime:/etc/localtime \ +-p 27017:27017 \ +-e MONGO_INITDB_ROOT_USERNAME=admin \ +-e MONGO_INITDB_ROOT_PASSWORD=qq123456 \ +mongo:5.0.6 + + + +``` + + +### 后续使用 + +1.启动容器 + +> docker start mongo5.0.6 + +2.停止容器 + +>docker stop mongo5.0.6 + + + +#### 指定配置文件 + +``` +docker run --restart=on-failure:3 --privileged=true -d \ +--name mongotest \ +-v /disk1/dockerContainer/mongodbtest/datadb:/data/db \ +-v /disk1/dockerContainer/mongodbtest/conf:/data/configdb \ +-v /etc/localtime:/etc/localtime \ +-p 27027:27027 \ +mongo:5.0.6 -f /data/configdb/mongo.conf + +``` + +mongo.conf + +后面要开启security鉴权 + +``` +systemLog: + destination: file + path: /var/log/mongodb/mongod.log + logAppend: true +storage: + dbPath: /data/db +net: + port: 27027 + bindIp: 0.0.0.0 +#security: + #authorization: enabled + +# how the process runs +processManagement: + timeZoneInfo: /usr/share/zoneinfo +``` diff --git a/docker/docker-mysql.md b/docker/docker-mysql.md new file mode 100644 index 0000000..3164e34 --- /dev/null +++ b/docker/docker-mysql.md @@ -0,0 +1,89 @@ +# docker-mysql8.0.23 + +1.拉取mysql 镜像 + +>docker pull mysql:8.0.23 + +2.创建 文件夹 + +>mkdir -p /disk1/dockerContainer/mysql/data /disk1/dockerContainer/mysql/logs /disk1/dockerContainer/mysql/conf + + +3. 创建 my.cnf 文件 +>vi my.cnf + +``` + +[client] + +#socket = /usr/mysql/mysqld.sock +default-character-set = utf8mb4 + +[mysqld] +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/run/mysqld/mysqld.sock +datadir = /var/lib/mysql + +#slow_query_log = 1 + +sql_mode = STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION + +max_connections = 2000 +character_set_server = utf8mb4 +collation_server = utf8mb4_bin +# +secure-file-priv= +# Disabling symbolic-links is recommended to prevent assorted security risks +symbolic-links=0 + +# Custom config should go here +!includedir /etc/mysql/conf.d/ + +``` + +4.查看mysql 镜像 获取 id + +>docker images + +5. 获得 cbe8815cbea8 取前四位即可 并启动mysql + +``` + +docker run --restart=on-failure:3 --privileged=true -d \ +--name mysql8.0.23 \ +-v /disk1/dockerContainer/mysql/conf/my.cnf:/etc/mysql/my.cnf \ +-v /disk1/dockerContainer/mysql/data:/var/lib/mysql \ +-v /disk1/dockerContainer/mysql/logs:/logs \ +-p 10131:3306 -e MYSQL_ROOT_PASSWORD=123456 cbe8 + + +``` + +6.进入容器内部 +> docker exec -it mysql8.0.23 /bin/bash + +7.连接mysql (第五步指定了密码为123456)并修改密码 + +>mysql -u root -p + +>mysql> use mysql + +>alter user 'root'@'%' identified with mysql_native_password by '123456'; + +>flush privileges; + +注意 : 可将实际的使用的密码替换 123456 + +8. 退出容器 + +>exit + +### 后续使用 + +1.启动容器 + +> docker start mysql8.0.23 + +2.停止容器 + +>docker stop mysql8.0.23 diff --git a/docker/docker-nexus3.md b/docker/docker-nexus3.md new file mode 100644 index 0000000..7efb1fc --- /dev/null +++ b/docker/docker-nexus3.md @@ -0,0 +1,243 @@ +# docker-nexus3 + + docker 构建一个Maven私服 + + +1.拉取nexus3 + +>docker pull sonatype/nexus3 + +2.创建数据目录 根据你自己的需求变更路径 + +>mkdir -p /usr/local/nexus3/nexus-data && chown -R 200 /usr/local/nexus3/nexus-data + +3.启动nexus + +>docker run -d -p 8081:8081 --privileged=true --name nexus -v /usr/local/nexus3/nexus-data:/nexus-data 3aa3 + +4. 设置nexus3的默认密码 + +>cd /usr/local/nexus3/nexus-data + +>cat admin.password + + 会得到一串 密码 复制即可 + + 开浏览器访问 http://ip:8081/ + + 右上角登录 输入 账号 admin + 密码 刚刚复制的那一串 + + 然后重置密码即可 + +5. 默认仓库说明 + +maven-central:maven中央库,默认从https://repo1.maven.org/maven2/拉取jar + +maven-releases:私库发行版jar,初次安装请将Deployment policy设置为Allow redeploy + +maven-snapshots:私库快照(调试版本)jar + +maven-public:仓库分组,把上面三个仓库组合在一起对外提供服务,在本地maven基础配置settings.xml或项目pom.xml中使用 + +仓库类型 + +Group:这是一个仓库聚合的概念,用户仓库地址选择Group的地址,即可访问Group中配置的,用于方便开发人员自己设定的仓库。maven-public就是一个Group类型的仓库,内部设置了多个仓库,访问顺序取决于配置顺序,3.x默认Releases,Snapshots, Central,当然你也可以自己设置。 + +Hosted:私有仓库,内部项目的发布仓库,专门用来存储我们自己生成的jar文件 + +3rd party:未发布到公网的第三方jar (3.x去除了) + +Snapshots:本地项目的快照仓库 + +Releases: 本地项目发布的正式版本 + +Proxy:代理类型,从远程中央仓库中寻找数据的仓库(可以点击对应的仓库的Configuration页签下Remote Storage属性的值即被代理的远程仓库的路径),如可配置阿里云maven仓库 + +Central:中央仓库 + +Apache Snapshots:Apache专用快照仓库(3.x去除了) + + +6.增加代理源 + +点击设置那个图标 --> Repositories --> Create repository --> maven2 (proxy) + + +Name 填写 aliyun + +Remote storage: 填写 http://maven.aliyun.com/nexus/content/groups/public + +Not found cache TTL: 修改为 288000 + +保存完事 以此类推 + +``` + +1. aliyun +http://maven.aliyun.com/nexus/content/groups/public +2. apache_snapshot +https://repository.apache.org/content/repositories/snapshots/ +3. apache_release +https://repository.apache.org/content/repositories/releases/ +4. atlassian +https://maven.atlassian.com/content/repositories/atlassian-public/ +5. central.maven.org +http://central.maven.org/maven2/ +6. datanucleus +http://www.datanucleus.org/downloads/maven2 +7. maven-central (安装后自带,仅需设置Cache有效期即可) +https://repo1.maven.org/maven2/ +8. nexus.axiomalaska.com +http://nexus.axiomalaska.com/nexus/content/repositories/public +9. oss.sonatype.org +https://oss.sonatype.org/content/repositories/snapshots +10.pentaho +https://public.nexus.pentaho.org/content/groups/omni/ + + +``` + +7. 设置maven-public 将这些代理加入Group + +点击设置那个图标 --> Repositories --> maven-public +也可以直接使用下面的链接 +http://nexus3-host:8081/#admin/repository/repositories:maven-public + +找到 Member repositories: + +把我们自己添加的代理移动到左边的 Members + +建议 把 aliyun 置顶 然后 保存完事 + + + +6.在我们maven的settings.xml增加部分配置 + + 注意 nexus3-host 这个是我在hosts文件 中配置了hosts + 如果没有配置默认使用ip即可 + + +``` + + + + nexus-releases + admin + admin123 + + + + nexus-snapshots + admin + admin123 + + + + + + OjbKMirror + * + OjbK Repository Mirror. + http://nexus3-host:8081/repository/maven-public/ + + + + + + OjbK + + + nexus + Public Repositories + http://nexus3-host:8081/repository/maven-public/ + + true + + + + + central + Central Repositories + http://nexus3-host:8081/repository/maven-central/ + + true + + + false + + + + + release + Release Repositories + http://nexus3-host:8081/repository/maven-releases/ + + true + + + false + + + + + snapshots + Snapshot Repositories + http://nexus3-host:8081/repository/maven-snapshots/ + + true + + + true + + + + + + + plugins + Plugin Repositories + http://nexus3-host:8081/repository/maven-public/ + + + + + + + +``` + +7. maven 项目的 pom文件中加入 + +``` + + + + nexus-releases + Nexus Release Repository + http://nexus3-host:8081/repository/maven-releases/ + + + nexus-snapshots + Nexus Snapshot Repository + http://nexus3-host:8081/repository/maven-snapshots/ + + + +``` + +这样就可以使用 mvn deploy 上传到私服了 + + + +8. 注意事项 + +明明配置了 以上内容 deploy 却报 401无权限 +检查默认的settings.xml 是否已经配置过以上参数 + +>mvn help:effective-settings + +如果 idea 底部的Terminal 执行失败 可尝试 左侧的maven插件使用Lifecycle中deploy + + + diff --git "a/docker/docker-nginx-\350\207\252\347\274\226\350\257\221\347\211\210.md" "b/docker/docker-nginx-\350\207\252\347\274\226\350\257\221\347\211\210.md" new file mode 100644 index 0000000..4f5d608 --- /dev/null +++ "b/docker/docker-nginx-\350\207\252\347\274\226\350\257\221\347\211\210.md" @@ -0,0 +1,84 @@ +# 服务器安装 docker-nginx + +需要准备 Dockerfile + +>vi Dockerfile + +``` + +FROM centos:7 +MAINTAINER wxm i@ojbk.plus + +#RUN yum makecache + +# 准备环境 安装 wget +#RUN yum -y update +# 安装依赖 +RUN yum -y install pcre pcre-devel +RUN yum -y install zlib zlib-devel +RUN yum -y install openssl openssl-devel +RUN yum -y install gcc gcc-c++ autoconf automake +RUN yum -y install zlib zlib-devel openssl openssl-devel pcre-devel +RUN yum -y install wget +RUN yum clean all + +# 下载Nginx源码包 +RUN wget http://nginx.org/download/nginx-1.18.0.tar.gz +RUN tar zxvf nginx-1.18.0.tar.gz && \ + rm -rf nginx-1.18.0.tar.gz + +# 配置及编译Nginx +RUN cd nginx-1.18.0 && ./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_ssl_module --with-threads --with-http_gzip_static_module --with-http_sub_module --with-http_v2_module && make && make install + +# 删除临时文件 +RUN rm -rf nginx-1.18.0 + +# 运行Nginx +RUN /usr/local/nginx/sbin/nginx + +# 暴露80 443端口 +EXPOSE 80 443 + +# 启动Nginx +CMD ["/usr/local/nginx/sbin/nginx", "-g", "daemon off;"] + + + +``` + +注意末尾有个 . + +>docker build -t dhy:nginx1.18 . + +经过漫长的等待 + +>docker images + +就可以看到 nginx 已经打包成镜像了 + +当然 这里要 先搞一份nginx 配置文件到 /disk1/dockerContainer/nginx/conf/目录下 + +然后执行 + +docker run --privileged=true -d -p 10130:80 -p 10223:443 --name nginx1.18 \ +-v /disk1/dockerContainer/nginx/html:/usr/local/nginx/html \ +-v /disk1/dockerContainer/nginx/conf/nginx.conf:/usr/local/nginx/conf/nginx.conf \ +-v /disk1/dockerContainer/nginx/logs:/usr/local/nginx/logs 9413 + + +进入 容器 + +docker exec -it nginx1.18 /bin/bash + +修改一下系统时间 + +ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime +echo "Asia/Shanghai" > /etc/timezone + + + + +## nginx 资料 + +https://github.com/xx13295/MD-Note/tree/master/nginx + diff --git a/docker/docker-nginx.md b/docker/docker-nginx.md new file mode 100644 index 0000000..c40fd3c --- /dev/null +++ b/docker/docker-nginx.md @@ -0,0 +1,28 @@ +# docker-nginx + +1.拉取nginx 镜像 + +>docker pull nginx:1.18 + +2.查看nginx 镜像 获取 id + +>docker images + +3. 获得 c2c45d506085 取前四位即可 + +>docker run -d --name nginx-test -p 80:80 c2c4 + +4. 创建目录 dockerContainer 准备将容器里的文件拷贝出来 +> mkdir -p /disk1/dockerContainer/nginx/html /disk1/dockerContainer/nginx/logs /disk1/dockerContainer/nginx/conf + +5. docker ps -a 获得容器id dfe7287a20e9 将容器里的文件拷贝到相应的目录 + +> docker cp dfe7287a20e9:/etc/nginx/nginx.conf /disk1/dockerContainer/nginx/conf + +6. 把nginx-test 停了 先docker ps -a 获取id 再 kill 在 rm + +7. 启动 + +>docker run -d -p 10130:80 --name nginx-1.18 -v /disk1/dockerContainer/nginx/html:/usr/share/nginx/html -v /disk1/dockerContainer/nginx/conf/nginx.conf:/etc/nginx/nginx.conf -v /disk1/dockerContainer/nginx/logs:/var/log/nginx c2c4 + + diff --git a/docker/docker-oracle.md b/docker/docker-oracle.md new file mode 100644 index 0000000..ceb5747 --- /dev/null +++ b/docker/docker-oracle.md @@ -0,0 +1,43 @@ +# oracle + + 未测试 + + +>docker pull registry.cn-hangzhou.aliyuncs.com/woqutech/oracle-database-11.2.0.4.0-ee + +``` + +docker run -d --name oracledb \ +-p 10136:1521 \ +-e ORACLE_SID=orcl \ +-e ORACLE_PWD=oracle \ +-e ORACLE_CHARACTERSET=ZHS16GBK \ +-e SGA_SIZE=8G \ +-e PGA_SIZE=8G \ +-e DB_ROLE=primary \ +-e ENABLE_ARCH=true \ +-v /home/oracledata/data:/opt/oracle/oradata \ +registry.cn-hangzhou.aliyuncs.com/woqutech/oracle-database-11.2.0.4.0-ee + + +``` + + +``` + + sqlplus /nolog + + conn /as sysdba + + + alter user system identified by 123456; + + alter user sys identified by 123456; + + alter user scott identified by 123456; + alter user scott account unlock; + + ALTER PROFILE DEFAULT LIMIT PASSWORD_LIFE_TIME UNLIMITED; + + +``` diff --git a/docker/docker-rabbitMq.md b/docker/docker-rabbitMq.md new file mode 100644 index 0000000..df24e5e --- /dev/null +++ b/docker/docker-rabbitMq.md @@ -0,0 +1,63 @@ +# docker-rabbitMq + +1.拉取rabbitMq 镜像 + +>docker pull rabbitmq:3.8.14 + +2.创建 文件夹 + +>mkdir -p /disk1/dockerContainer/rabbitmq/data /disk1/dockerContainer/rabbitmq/conf /disk1/dockerContainer/rabbitmq/log + + +3. 编辑 vi /disk1/dockerContainer/rabbitmq/conf/rabbitmq.conf + +``` + +#限制guest用户远程访问 true禁止 false开启 +loopback_users.guest = true +#rabbitmq 端口 +listeners.tcp.default = 5672 +#管理界面端口 +management.tcp.port = 15672 + + + +``` + + 注意一点 + 如果你要使用 guest用户在web管理界面上创建其他用户 + loopback_users.guest可以先选择fasle + 创建完后再将其改为true + + +4. 启动rabbitmq + +``` + + +docker run --restart=on-failure:3 --privileged=true -d --name rabbitmq3.8 -p 10133:5672 -p 10134:15672 \ +-v /disk1/dockerContainer/rabbitmq/data:/var/lib/rabbitmq \ +-v /disk1/dockerContainer/rabbitmq/log:/var/log/rabbitmq \ +-v /disk1/dockerContainer/rabbitmq/conf:/etc/rabbitmq f83a + + +``` + +5.进入容器 启动 rabbitmq_management + +>docker exec -it rabbitmq3.8 /bin/bash + +启动web管理界面 + +>rabbitmq-plugins enable rabbitmq_management + + +### 后续使用 + +1.启动容器 + +> docker start rabbitmq3.8 + +2.停止容器 + +>docker stop rabbitmq3.8 diff --git a/docker/docker-redis.md b/docker/docker-redis.md new file mode 100644 index 0000000..5dc68f8 --- /dev/null +++ b/docker/docker-redis.md @@ -0,0 +1,81 @@ +# docker-redis + +## redis 6.2.6 + +请参照下面的5.0.5 步骤 + +``` +docker run --restart=on-failure:3 --privileged=true -d \ +--name redis6.2.6 \ +-v /disk1/dockerContainer/redis/redis.conf:/etc/redis/redis.conf \ +-v /disk1/dockerContainer/redis/data:/data \ +-v /etc/localtime:/etc/localtime \ +-p 6379:6379 redis:6.2.6 redis-server /etc/redis/redis.conf --appendonly yes + +``` + + + +1.拉取redis 镜像 + +>docker pull redis:5.0.5 + +2.创建 文件夹 + +>mkdir -p /disk1/dockerContainer/redis/data + + +3. 复制相应 redis.conf 文件到 /disk1/dockerContainer/redis下 + +4. 启动redis + +``` + +docker run --restart=on-failure:3 --privileged=true -p 6379:6379 --name redis5.0.5 -v /disk1/dockerContainer/redis/redis.conf:/etc/redis/redis.conf -v /disk1/dockerContainer/redis/data:/data -d redis:5.0.5 redis-server /etc/redis/redis.conf --appendonly yes + +``` + + +### 后续使用 + +1.启动容器 + +> docker start redis5.0.5 + +2.停止容器 + +>docker stop redis5.0.5 + + + +## 进阶 + + +配置文件注意修改 aclfile + +>touch /disk1/dockerContainer/redis/users.acl + + +配置文件注意修改 logfile + +>touch /disk1/dockerContainer/redis/redis.log + + 目前涉及到容器内部权限问题 暂未得到优雅的解决方案 + --privileged=true 并未对 redis.conf中的引用文件获得权限 + 无脑先进容器授权可解决redis.log读取文件。 + 但问题 acl save 无法良好支持 + 可手动编辑users.acl文件来使用acl功能。 + + + +``` +docker run --restart=on-failure:3 --privileged=true -d \ +--name redis6.2.6 \ +-v /disk1/dockerContainer/redis/redis.conf:/etc/redis/redis.conf \ +-v /disk1/dockerContainer/redis/users.acl:/etc/redis/users.acl \ +-v /disk1/dockerContainer/redis/redis.log:/etc/redis/redis.log \ +-v /disk1/dockerContainer/redis/data:/data \ +-v /etc/localtime:/etc/localtime \ +-p 6379:6379 redis:6.2.6 redis-server /etc/redis/redis.conf --appendonly yes + +``` diff --git a/docker/redis.conf b/docker/redis.conf new file mode 100644 index 0000000..49bc95e --- /dev/null +++ b/docker/redis.conf @@ -0,0 +1,635 @@ +#指定redis只能接受来自此IP绑定的网卡的请求,注意此默认值默认外网是不可访问的 +#bind 127.0.0.1 + +#是否开启保护模式。如果没有指定bind和密码,redis只会本地进行访问,拒绝外部访问。 +protected-mode no + +#默认端口,建议生产环境不要使用默认端口避免被恶意扫描到 +port 6379 + +#密码 +requirepass docker.mima.redis + +#TCP连接中已完成队列(完成三次握手之后)的长度 +#backlog其实是一个连接队列,backlog队列总和=未完成三次握手队列 + 已完成三次握手队列。 +#在高并发环境下你需要一个高backlog值来避免慢客户端连接问题。 +tcp-backlog 511 + +#客户端连接空闲超过timeout将会被断开,为0则断开(0表示不断,一直连着) +timeout 0 + +#tcp keepalive参数 +tcp-keepalive 300 + +#是否后台启动 +daemonize no + +#可以通过upstart和systemd管理Redis守护进程 +#选项: +#supervised no - 没有监督互动 +#supervised upstart - 通过将Redis置于SIGSTOP模式来启动信号 +#supervised systemd - signal systemd将READY = 1写入$ NOTIFY_SOCKET +#supervised auto - 检测upstart或systemd方法基于 UPSTART_JOB或NOTIFY_SOCKET环境变量 +supervised no + +#配置PID文件路径 +pidfile /var/run/redis_6379.pid + +#日志级别 +#参数: +# debug +# verbose +# notice +# warning +loglevel notice + +logfile "" + +#数据库的数量 +databases 16 + +always-show-logo yes + +save 900 1 +save 300 10 +save 60 10000 + +#持久化出现错误后,是否依然进行继续进行工作 +stop-writes-on-bgsave-error yes + +#是否校验rdb文件 +rdbcompression yes + +#使用压缩rdb文件,rdb文件压缩使用LZF压缩算法 +rdbchecksum yes + +#rdb文件名称 +dbfilename dump.rdb + +#rdb使用上面的dbfilename配置指令的文件名保存到这个目录 +dir ./ + + +slave-serve-stale-data yes + +slave-read-only yes + +#同步策略: 磁盘或socket,默认磁盘方式 +repl-diskless-sync no + +#如果非磁盘同步方式开启,可以配置同步延迟时间 +#以等待master产生子进程通过socket传输RDB数据给slave +#默认值为5秒,设置为0秒则每次传输无延迟。 +repl-diskless-sync-delay 5 + +#是否在slave套接字发送SYNC之后禁用 TCP_NODELAY +#如果选择yes,Redis将使用更少的TCP包和带宽来向slaves发送数据。 +#但是这将使数据传输到slave上有延迟,Linux内核的默认配置会达到40毫秒。 +#如果选择no,数据传输到salve的延迟将会减少但要使用更多的带宽。 +#默认我们会为低延迟做优化,但高流量情况或主从之间的跳数过多时,可以设置为yes +repl-disable-tcp-nodelay no + +#优先级 +slave-priority 100 + + + +#内存满逐出 +lazyfree-lazy-eviction no +#过期key删除 +lazyfree-lazy-expire no +#内部删除,比如rename oldkey newkey时,如果newkey存在需要删除newkey +lazyfree-lazy-server-del no +#接收完RDB文件后清空数据选项 +slave-lazy-flush no + +#每次启动时Redis都会先把这个文件的数据读入内存里,先忽略RDB文件 +appendonly yes + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# appendfsync always +appendfsync everysec +# appendfsync no + +no-appendfsync-on-rewrite no + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +aof-load-truncated yes + +aof-use-rdb-preamble no + +#Lua 脚本的最大执行毫秒数 +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested +# even in production and manually tested by multiple engineers for some +# time. +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in an "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag yes + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage +# active-defrag-cycle-min 25 + +# Maximal effort for defrag in CPU percentage +# active-defrag-cycle-max 75 + + diff --git "a/docker/\344\277\256\346\224\271docker\351\225\234\345\203\217\345\255\230\346\224\276\344\275\215\347\275\256.md" "b/docker/\344\277\256\346\224\271docker\351\225\234\345\203\217\345\255\230\346\224\276\344\275\215\347\275\256.md" new file mode 100644 index 0000000..3053ce6 --- /dev/null +++ "b/docker/\344\277\256\346\224\271docker\351\225\234\345\203\217\345\255\230\346\224\276\344\275\215\347\275\256.md" @@ -0,0 +1,20 @@ +# 修改 docker容器位置 + +注意如果已经有容器 和镜像了 先统统删除 做好数据备份 + +vi /etc/docker/daemon.json + +{ +"registry-mirrors": ["https://2btbrkw6.mirror.aliyuncs.com"], +"storage-driver": "overlay2", +"graph":"/disk1/dockerRes/docker", +"storage-opts": ["overlay2.override_kernel_check=true"] +} + +其中 /disk1/dockerRes/docker 替换为你需要更改的地址 + + +重启生效 + +>systemctl restart docker + diff --git a/drools/Drools-Global.md b/drools/Drools-Global.md new file mode 100644 index 0000000..1db66a5 --- /dev/null +++ b/drools/Drools-Global.md @@ -0,0 +1,129 @@ +# 规则引擎Drools 规则文件关键字 Global + +### Global全局变量 + + global关键字用于在规则文件中定义全局变量, + 它可以让应用程序的对象在规则文件中能够被访问。 + 可以用来为规则文件提供数据或服务。 + +语法结构为: + + global 对象类型 对象名称 + +举例: + +drl 文件 + +```aidl + +package helloworld + + +global java.util.List myGlobalList //定义一个集合类型的全局变量 +global java.lang.Integer count //定义一个包装类型的全局变量 +global io.springboot.drools.service.GlobalService globalService //定义一个JavaBean类型的全局变量 + +rule "rule_global" + when + eval(true) + then + myGlobalList.add("ojbk"); + System.err.println(count); + globalService.get(count); +end + +``` + +java 代码 + +```aidl + + @Autowired + private GlobalService globalService; + + @Autowired + private KieBase kieBase; + + @RequestMapping("/global") + public String global(){ + List list = new ArrayList<>(); + + KieSession session = kieBase.newKieSession(); + + session.setGlobal("globalService", globalService); + session.setGlobal("count", 6); + session.setGlobal("myGlobalList", list); + + session.fireAllRules(); + session.dispose(); + + System.err.println(list); + return "ok"; + } + +``` + +```aidl + +public interface GlobalService { + + void get(Integer count); + +} + + +@Service +public class GlobalServiceImpl implements GlobalService { + @Override + public void get(Integer name) { + + System.err.println("使用 global 方式 get count: " + count); + + } +} + +``` + +以上代码请自行在代码中加入实验 动手试试比光看强 [传送门](https://github.com/xx13295/springboot-drools-redis) + +观看源码 可知 setGlobal 本质上其实是一个map + +因此 drl文件中声明的global 要和java代码中的类型、变量名保持一致 + +MapGlobalResolver 它实现了GlobalResolver + +```aidl +public class MapGlobalResolver implements GlobalResolver, Globals, Externalizable { + + private static final long serialVersionUID = 510l; + + private Map map; + + private Globals delegate; + + public MapGlobalResolver() { + this.map = new ConcurrentHashMap(); + } + + public MapGlobalResolver(Map map) { + if (map instanceof ConcurrentHashMap) { + this.map = map; + } else { + this.map = new ConcurrentHashMap(); + this.map.putAll(map); + } + } + + public void setGlobal(String identifier, Object value) { + this.map.put( identifier, value ); + } + + ······ + +``` + +注意: + + global是不会放到工作内存中的如果我们在定义全局变量时有两个规则文件中的都用到了同一个全局变量 + 这两个global 的内容不会因为其他调用的改变而改变。 + 因此 global 不用来做数据共享, session会影响global的用法 diff --git a/drools/README.md b/drools/README.md new file mode 100644 index 0000000..45ce12a --- /dev/null +++ b/drools/README.md @@ -0,0 +1,164 @@ +# 规则引擎 Drools + +## 1.规则引擎概述 + + 规则引擎,全称为业务规则管理系统,英文名为BRMS(即Business Rule Management System)。 + 规则引擎的主要思想是将应用程序中的业务决策部分分离出来,并使用预定义的语义模块编写业务决策(业务规则), + 由用户或开发者在需要时进行配置、管理。 + + 需要注意的是规则引擎并不是一个具体的技术框架,而是指的一类系统,即业务规则管理系统。 + 目前市面上具体的规则引擎产品有:drools、VisualRules、iLog等。 + + 规则引擎实现了将业务决策从应用程序代码中分离出来,接收数据输入,解释业务规则,并根据业务规则做出业务决策。 + 规则引擎其实就是一个输入输出平台。 + +## 2. 使用规则引擎的优势 + 使用规则引擎的优势如下: + + 1、业务规则与系统代码分离,实现业务规则的集中管理 + + 2、在不重启服务的情况下可随时对业务规则进行扩展和维护 + + 3、可以动态修改业务规则,从而快速响应需求变更 + + 4、规则引擎是相对独立的,只关心业务规则,使得业务分析人员也可以参与编辑、维护系统的业务规则 + + 5、减少了硬编码业务规则的成本和风险 + + 6、使用规则引擎提供的规则编辑工具,使复杂的业务规则实现变得的简单 + +### 三、简单介绍 + +1、术语解释 + +* Rule:一条规则可以看作是if else 一组判断和一组输出 + +* RuleBase: RuleBase包含一个或多个规则包,它们已经被校验和编译完成,是可以序列化的 + +* Package: 规则包,是规则以及其它相关结构的一个集合,包必须有一个名称空间,并且使用标准的java约定进行命名 + +* WorkingMemory: 用户工作区,包含用户的数据和相关Rule的引用 + +* Facts: Facts就是规则中用到的输入,Facts可以是任何规则可以存取的Java对象,规则引擎完全不会克隆对象,它仅仅是保存对对象的一个引用/指针 + +2、规则文件详解 + + 目前市面上常用的drools规则文件通常是以 .drl 扩展名结尾 + + 在一个drl文件中可以包含多个规则,函数等等。 + +规则文件的构成: + + package package-name //定义包名 + + imports //导入java包 + + globals //定义全局变量,如 global java.util.List myGlobalList + + functions //定义函数 + + rules //一系列的规则 + + querys //一系列查询 + +规则的构成: + + package "packageName" + + imports com.xxx.Xxx + + rule "ruleName" + attributes + when + LHS + then + RHS + end + +查询的构成 + + query "queryName" + + LHS + + end + + +说明: + + LHS是规则的条件部分,可以定义变量 + + RHS是允许Java语义代码,RHS中的多条语句实质上是一个规则,只有满足全部语句才符合规则 + + 任何在LHS中绑定的变量可以在RHS中使用 + +3、规则文件示例解读 + +```aidl + +package helloworld + +import io.springboot.drools.model.Employee + +rule "rule_employee_raise" + agenda-group "group-rule2" + no-loop true + when + $employee:Employee(year >= 3 && salary <= 10000) + then + System.out.println("抠门公司准备给员工ID="+$employee.getId()+" ,名字= "+$employee.getName()+" 加薪"); + $employee.setSalary($employee.getSalary() + 100); + update($employee); + +end + +``` + +以上规则文件drl 可以看出 符合我们的规则构成 + +其中 : + +attributes 为以下内容 +```aidl + +agenda-group "name" +no-loop true + +``` + +LHS条件部分 + +当传入的员工对象 $employee 的工作年限year 大于等于3年 并且 工资salary小于等于10000时 触发我们的规则 + +```aidl +$employee:Employee(year >= 3 && salary <= 10000) +``` + + +RHS部分可以执行相应的动作 + +满足条件的员工可以增加薪水100 + +其中 `update($employee)` 的作用是更新工作内存中的数据,并让相关的规则重新匹配。 (要避免死循环) + +所以再attributes中 添加了 `no-loop true` + +如果员工的工资原来为8000 执行了这个规则 没有添加 `no-loop true` 那么会导致循环加薪直到 工资变成10100 + +这是老板不想看到的 + +```aidl + $employee.setSalary($employee.getSalary() + 100); + update($employee); +``` + +划重点: + + 规则引擎完全不会克隆对象,它仅仅是保存对对象的一个引用/指针 + 即,在规则定义中对fact的修改,就是对代码中fact对象的修改。 + 也即,规则的根本目的是产生一个供使用的输出结果,即修改后的JavaBean + + +### 学习资料 + +[完整代码传送门](https://github.com/xx13295/springboot-drools-redis) \ No newline at end of file diff --git a/elasticsearch/README.md b/elasticsearch/README.md new file mode 100644 index 0000000..a89a518 --- /dev/null +++ b/elasticsearch/README.md @@ -0,0 +1,147 @@ +# elasticsearch + + 1.官网 + https://www.elastic.co + + 2.Github + https://github.com/elastic/elasticsearch + + +# 安装 + + 首先你得有java-jdk,否则你是无法正常使用es的 + +>https://www.elastic.co/cn/downloads/elasticsearch + + 根据实际需求下载相应版本,本例子 使用elasticsearch-7.3.2-linux-x86_64.tar + + 解压安装就不在多说! + + 将解压后的 elasticsearch-7.3.2 放置于/usr/local/elasticsearch/ 目录下 + +1.创建用户 , elasticsearch不可使用root用户执行 + +>useradd -r elasticsearch + +2.授权 +> cd /usr/local/ + +>chown elasticsearch:elasticsearch -R elasticsearch + +3.启动 + +>cd /usr/local/elasticsearch/ elasticsearch-7.3.2/bin + +>su elasticsearch + +>./elasticsearch + + 虽然会提示 + future versions of Elasticsearch will require Java 11; + your Java version from [/usr/local/java/jdk1.8.0_131/jre] + does not meet this requirement + 但不用慌张,软件是向下兼容的 我们使用jdk8一样可以正常启动 + +4.检验是否成功启动 + +>curl http://127.0.0.1:9200/ + + 出现以下内容证明 es 已启动 + + { + "name": "localhost.localdomain", + "cluster_name": "elasticsearch", + "cluster_uuid": "0QKWfCxdTUislDnmmebV-w", + "version": { + "number": "7.3.2", + "build_flavor": "default", + "build_type": "tar", + "build_hash": "1c1faf1", + "build_date": "2019-09-06T14:40:30.409026Z", + "build_snapshot": false, + "lucene_version": "8.1.0", + "minimum_wire_compatibility_version": "6.8.0", + "minimum_index_compatibility_version": "6.0.0-beta1" + }, + "tagline": "You Know, for Search" + } + + + 但是 这个只能按照的机器本地访问, 如果想其他机器也访问这个 9200端口地址 + 则需要修改 配置文件 + +5.修改配置文件 + +>vi /usr/local/elasticsearch/elasticsearch-7.3.2/config/elasticsearch.yml + + 找到 'network.host: 你本地ip' + + 修改为 network.host: 0.0.0.0 + + 然后保存重启 + +6. 修改 network.host 无法正常启动问题 + + 根据错误提示 具体问题具体分析 + 可能会出现以下问题 + +[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535] + +[2]: max number of threads [1024] for user [elasticsearch] is too low, increase to at least [4096] + +[3]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] + +[4]: system call filters failed to install; check the logs and fix your configuration or +disable system call filters at your own risk + +[5]: the default discovery settings are unsuitable for production use; at least one of +[discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configured + + 大致上 如下 增加配置,若还出现问题 百度一下 + 切换root用户 操作以下过程 + +>vi /etc/security/limits.d/90-nproc.conf + 将原来的1024 改为 4096 保存 + + * soft nproc 4096 + +>vi /etc/security/limits.conf + 在底下增加 4行 + + * soft nofile 65536 + + * hard nofile 65536 + + * soft nproc 4096 + + * hard nproc 4096 + +>vi /etc/sysctl.conf + 增加 vm.max_map_count配置 +vm.max_map_count=262144 + +最后 切换回 es用户 修改 elasticsearch.yml + + 增加 + + bootstrap.memory_lock: false + bootstrap.system_call_filter: false + cluster.initial_master_nodes: ["node-1"] + + 保存完事 + + + + + + + + + + + + + + + + \ No newline at end of file diff --git "a/elasticsearch/es\347\224\250\346\210\267\345\210\235\345\247\213\345\214\226.md" "b/elasticsearch/es\347\224\250\346\210\267\345\210\235\345\247\213\345\214\226.md" new file mode 100644 index 0000000..fc44e28 --- /dev/null +++ "b/elasticsearch/es\347\224\250\346\210\267\345\210\235\345\247\213\345\214\226.md" @@ -0,0 +1,23 @@ +# 开启用户鉴权 + +elasticsearch.yml + +``` +http.cors.enabled: true +http.cors.allow-origin: "*" +http.cors.allow-headers: Authorization +xpack.security.enabled: true +xpack.security.transport.ssl.enabled: true + +``` + + + +>./elasticsearch-setup-passwords interactive + +如果是docker 安装的就 进入 容器内部的bin目录 去操作 + +> docker exec -it elasticsearch /bin/bash + + +然后重启es 即可 diff --git a/exiftool/README.md b/exiftool/README.md new file mode 100644 index 0000000..9881c13 --- /dev/null +++ b/exiftool/README.md @@ -0,0 +1,90 @@ +# Exiftool介绍 + + 官网:https://exiftool.org/ + + ExifTool由Phil Harvey开发,是一款免费、跨平台的开源软件 + 用于读写和处理图像(主要)、音视频和PDF等文件的元数据(metadata) + ExifTool可以作为Perl库(Image::ExifTool)使用,也有功能齐全的命令行版本 + ExifTool支持很多类型的元数据,包括 + Exif、IPTC、XMP、JFIF、GeoTIFF、ICC配置文件、Photoshop IRB、FlashPix、AFCP和ID3 + 以及众多品牌的数码相机的私有格式的元数据。 + +### 什么是Exif + + Exif是可交换图像文件格式(Exchangeable image file format) + 是一种标准,定义了与数码相机捕获的图像(或其他媒体)有关的信息 + 用于存储重要的数据,比如相机的曝光、拍摄日期和时间,甚至GPS定位等。 + 在早期,摄影师需要随身携带笔记本来记录重要信息,如日期、快门速度、光圈等, + 这非常麻烦而且容易出错。 + 如今,每台数码相机都支持Exif, + 能够将拍摄时的很多参数通过这种格式(Exif)记录到照片中, + 这些照片(或其他类型的文件)中的额外数据就叫元数据(metadata), + 它由一系列参数组成,如快门速度、光圈、白平衡、相机品牌和型号、镜头、焦距等等。 + Exif信息可能会造成隐私泄露(相机型号、位置等), + 在社会工程学中,Exif也是获取目标信息的一种手段, + 所以建议在把照片上传到互联网之前先清理Exif数据。 + +### ExifTool使用示例 + + 几个常用的参数 + + -r:递归处理子目录 + -overwrite_original:不备份_original文件,直接覆盖 + -restore_original:恢复备份 + -delete_original:删除备份 + + 读取文件a.jpg的所有元数据 + exiftool a.jpg + + 写入标签artist、值ojbk到文件a.jpg(如果artist已存在将更新其值) + exiftool -artist=ojbk a.jpg + exiftool -artist=ojbk a.jpg b.jpg c.jpg(同时写入多个文件) + exiftool -artist=ojbk D:/images(写入目录中所有文件) + exiftool -artist=ojbk D:/images -r(递归处理子目录) + exiftool -artist="ojbk" -copyright="wxm" a.jpg(同时写入多个标签) + + 将创建时间、光圈、快门速度和ISO四项以列表形式保存为out.txt + exiftool -T -createdate -aperture -shutterspeed -iso DIR > out.txt + + 打印某照片的尺寸和曝光时间 + exiftool -s -ImageSize -ExposureTime a.jpg + + 递归扫描某目录所有照片,将共有的元数据写入相同文件名的txt文件中 + exiftool -r -w .txt -common DIR + + 生成image.raw的缩略图thumbnail.jpg + exiftool -b -ThumbnailImage image.raw > thumbnail.jpg + + 从a.jpg提取完整的xmp数据记录 + exiftool -xmp -b a.jpg> out.xmp + + 递归删除某目录下所有文件的全部元数据 + exiftool -all= -r DIR + +>摘录https://www.rmnof.com/article/exiftool-introduction/ + + + +### 使用 + + 有些相机会记录拍照时的GPS定位信息。如果你不希望别人看到使用该命令删除gps信息 + exiftool -gps:all= photo.jpg + + 删除所有信息 + exiftool -all= photo.jpg + + 删除EXIF以外的所有信息 + exiftool -all= --exif:all photo.jpg + + 这个命令显示指定文件的metadata的属性,当不能准确的获取exif信息 + mdls photo.jpg + + +## 乱码问题 + + exiftool -charset filename=utf8 -codedcharacterset=utf8 -v 你好.jpg + + + * https://exiftool.org/faq.html#Q10 + + * https://exiftool.org/exiftool_pod.html#WINDOWS-UNICODE-FILE-NAMES diff --git a/ffmpeg/Gif&Apng/convert/Apng2Gif/README.md b/ffmpeg/Gif&Apng/convert/Apng2Gif/README.md new file mode 100644 index 0000000..e331d16 --- /dev/null +++ b/ffmpeg/Gif&Apng/convert/Apng2Gif/README.md @@ -0,0 +1,19 @@ +# 使用方式: + + 设置ffmpeg环境变量 + 在Path 中加入 + D:\ffmpeg\bin; + 直接将png文件/文件夹 拖进 .bat 文件里 就可以进行转换 + + +### 注意事项: + +如果 将图片 拉入 .bat 文件中后 显示乱码 没有正确 执行 一般情况下是 +win10 命令提示符:mode不是内部或外部命令 +其实就是没有配置环境变量 + +在Path 中加入 + +> %SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem; + +有的时候 会卡在正在处理 按回车健试试 \ No newline at end of file diff --git a/ffmpeg/Gif&Apng/convert/Apng2Gif/apng/test.png b/ffmpeg/Gif&Apng/convert/Apng2Gif/apng/test.png new file mode 100644 index 0000000..ab15af5 Binary files /dev/null and b/ffmpeg/Gif&Apng/convert/Apng2Gif/apng/test.png differ diff --git a/ffmpeg/Gif&Apng/convert/Apng2Gif/apng2gif.bat b/ffmpeg/Gif&Apng/convert/Apng2Gif/apng2gif.bat new file mode 100644 index 0000000..b786615 --- /dev/null +++ b/ffmpeg/Gif&Apng/convert/Apng2Gif/apng2gif.bat @@ -0,0 +1,42 @@ +@mode con cols=63 lines=40 +@echo off +chcp 65001 >nul +title APNG转GIF工具v1.0 By 王小明 + +set "ffmpeg=ffmpeg" +set /A whitebgd = 1 +set "PATH=%PATH%;"%~dp0"" + +if "%~1"=="" ( + cd /D "%~dp0" +) else ( + cd /D "%~f1" +) +echo. +mkdir temp >nul 2>nul +for /F %%i in ('dir /b^|findstr .png$') do ( + echo 【正在处理 %%i 】 + echo. >nul + if %whitebgd%==1 ( + REM 加白底输出帧。来自 esterTion + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.png" -f image2 -filter_complex "pad=iw*2:ih:iw:ih:white,crop=iw/2:ih:0:0[back];[back][0]overlay=0:0" >nul 2>nul + ) else ( + REM 保留通明输出帧。 + + REM 不推荐使用。因为 gif 不支持半透明,只有透明和不透明,输出的gif将可能会有白边。 + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.png" -f image2 >nul 2>nul + ) + + cd temp + REM 生成调色板。参考 https://gist.github.com/gka/148bbad67871fa6ca8d0b97e4eee94b5 + %ffmpeg% -i "%%~ni_%%3d.png" -vf palettegen "%%~ni_palette.png" >nul 2>nul + REM 生成GIF + %ffmpeg% -v warning -i "%%~ni_%%3d.png" -i "%%~ni_palette.png" -lavfi "paletteuse,setpts=3*PTS" -loop 0 -y "..\%%~ni.gif" >nul 2>nul + cd .. + echo 【%%i 处理完成】 + echo. + ) +rd /s /q temp +echo 【全部文件处理完成,按任意键退出】 +pause >nul +exit \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/convert/Apng2Gif\344\275\277\347\224\250\346\225\231\347\250\213.gif" "b/ffmpeg/Gif&Apng/convert/Apng2Gif\344\275\277\347\224\250\346\225\231\347\250\213.gif" new file mode 100644 index 0000000..f99d550 Binary files /dev/null and "b/ffmpeg/Gif&Apng/convert/Apng2Gif\344\275\277\347\224\250\346\225\231\347\250\213.gif" differ diff --git "a/ffmpeg/Gif&Apng/convert/GIF2Apng\344\275\277\347\224\250\346\225\231\347\250\213.gif" "b/ffmpeg/Gif&Apng/convert/GIF2Apng\344\275\277\347\224\250\346\225\231\347\250\213.gif" new file mode 100644 index 0000000..13c3a1a Binary files /dev/null and "b/ffmpeg/Gif&Apng/convert/GIF2Apng\344\275\277\347\224\250\346\225\231\347\250\213.gif" differ diff --git a/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/1.gif b/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/1.gif new file mode 100644 index 0000000..3b76181 Binary files /dev/null and b/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/1.gif differ diff --git a/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/2.gif b/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/2.gif new file mode 100644 index 0000000..0e4f20b Binary files /dev/null and b/ffmpeg/Gif&Apng/convert/Gif2Apng/Gif/2.gif differ diff --git a/ffmpeg/Gif&Apng/convert/Gif2Apng/README.md b/ffmpeg/Gif&Apng/convert/Gif2Apng/README.md new file mode 100644 index 0000000..c93d6e4 --- /dev/null +++ b/ffmpeg/Gif&Apng/convert/Gif2Apng/README.md @@ -0,0 +1,21 @@ +# 使用方式: + + 设置ffmpeg环境变量 + 在Path 中加入 + D:\ffmpeg\bin; + 直接将gif文件/文件夹 拖进 .bat 文件里 就可以进行转换 + + 推荐使用 HD版(基本不压缩画质) > 轻微压缩版 > 狠一点压缩版 > 极致压缩版(不推荐使用) + + +### 注意事项: + +如果 将图片 拉入 .bat 文件中后 显示乱码 没有正确 执行 一般情况下是 +win10 命令提示符:mode不是内部或外部命令 +其实就是没有配置环境变量 + +在Path 中加入 + +>%SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem; + +有的时候 会卡在正在处理 按回车健试试 \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-HD\347\211\210.bat" "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-HD\347\211\210.bat" new file mode 100644 index 0000000..650ed5e --- /dev/null +++ "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-HD\347\211\210.bat" @@ -0,0 +1,38 @@ +@mode con cols=63 lines=40 +@echo off +chcp 65001 >nul +title GIF转工具APNG v1.1 By 王小明 + +set "ffmpeg=ffmpeg" +set /A whitebgd = 1 +set "PATH=%PATH%;"%~dp0"" +if "%~1"=="" ( + + cd /D "%~dp0" +) else ( + cd /D "%~f1" +) +echo. +mkdir temp >nul 2>nul +for /F %%i in ('dir /b^|findstr .gif$') do ( + echo 【正在处理 %%i 】 + echo. >nul + if %whitebgd%==1 ( + copy "%%i" "temp\%%~ni_%%3d.gif" >nul 2>nul + ) else ( + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.gif" -f image2 >nul 2>nul + ) + + cd temp + REM 生成APNG + %ffmpeg% -v warning -i "%%~ni_%%3d.gif" -plays 0 -vf "setpts=PTS-STARTPTS" -y "..\%%~ni.apng" >nul 2>nul + echo 【%%i 转换完成】 + cd .. + ren "%%~ni.apng" "%%~ni.png" >nul 2>nul + echo 【%%~ni.apng to %%~ni.png 重命名完成】 + echo. + ) +rd /s /q temp +echo 【全部文件处理完成,按任意键退出】 +pause >nul +exit \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\346\236\201\350\207\264\345\216\213\347\274\251\347\211\210\344\270\215\346\216\250\350\215\220\344\275\277\347\224\250.bat" "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\346\236\201\350\207\264\345\216\213\347\274\251\347\211\210\344\270\215\346\216\250\350\215\220\344\275\277\347\224\250.bat" new file mode 100644 index 0000000..49eac57 --- /dev/null +++ "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\346\236\201\350\207\264\345\216\213\347\274\251\347\211\210\344\270\215\346\216\250\350\215\220\344\275\277\347\224\250.bat" @@ -0,0 +1,42 @@ +@mode con cols=63 lines=40 +@echo off +chcp 65001 >nul +title GIF转工具APNG v1.0 By 王小明 + +set "ffmpeg=ffmpeg" +set /A whitebgd = 1 +set "PATH=%PATH%;"%~dp0"" + +if "%~1"=="" ( + cd /D "%~dp0" +) else ( + cd /D "%~f1" +) +echo. +mkdir temp >nul 2>nul +for /F %%i in ('dir /b^|findstr .gif$') do ( + echo 【正在处理 %%i 】 + echo. >nul + if %whitebgd%==1 ( + REM 加白底输出帧。来自 esterTion + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.gif" -f image2 -filter_complex "pad=iw*2:ih:iw:ih:white,crop=iw/2:ih:0:0[back];[back][0]overlay=0:0" >nul 2>nul + ) else ( + REM 保留通明输出帧。 + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.gif" -f image2 >nul 2>nul + ) + + cd temp + REM 生成调色板。参考 https://gist.github.com/gka/148bbad67871fa6ca8d0b97e4eee94b5 + %ffmpeg% -i "%%~ni_%%3d.gif" -vf palettegen "%%~ni_palette.gif" >nul 2>nul + REM 生成GIF + %ffmpeg% -v warning -i "%%~ni_%%3d.gif" -i "%%~ni_palette.gif" -plays 0 -vf "setpts=PTS-STARTPTS" -y "..\%%~ni.apng" >nul 2>nul + echo 【%%i 转换完成】 + cd .. + ren "%%~ni.apng" "%%~ni.png" >nul 2>nul + echo 【%%~ni.apng to %%~ni.png 重命名完成】 + echo. + ) +rd /s /q temp +echo 【全部文件处理完成,按任意键退出】 +pause >nul +exit \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\347\213\240\344\270\200\347\202\271\345\216\213\347\274\251\347\211\210.bat" "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\347\213\240\344\270\200\347\202\271\345\216\213\347\274\251\347\211\210.bat" new file mode 100644 index 0000000..ee9902b --- /dev/null +++ "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\347\213\240\344\270\200\347\202\271\345\216\213\347\274\251\347\211\210.bat" @@ -0,0 +1,37 @@ +@mode con cols=63 lines=40 +@echo off +chcp 65001 >nul +title GIF转工具APNG v1.1 By 王小明 + +set "ffmpeg=ffmpeg" +set /A whitebgd = 1 +set "PATH=%PATH%;"%~dp0"" +if "%~1"=="" ( + + cd /D "%~dp0" +) else ( + cd /D "%~f1" +) +echo. +mkdir temp >nul 2>nul +for /F %%i in ('dir /b^|findstr .gif$') do ( + echo 【正在处理 %%i 】 + echo. >nul + if %whitebgd%==1 ( + copy "%%i" "temp\%%~ni_%%3d.gif" >nul 2>nul + ) else ( + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.gif" -f image2 >nul 2>nul + ) + cd temp + REM 生成APNG + %ffmpeg% -v warning -i "%%~ni_%%3d.gif" -plays 0 -vf "setpts=PTS-STARTPTS,fps=fps=15" -y "..\%%~ni.apng" >nul 2>nul + echo 【%%i 转换完成】 + cd .. + ren "%%~ni.apng" "%%~ni.png" >nul 2>nul + echo 【%%~ni.apng to %%~ni.png 重命名完成】 + echo. + ) +rd /s /q temp +echo 【全部文件处理完成,按任意键退出】 +pause >nul +exit \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\350\275\273\345\276\256\345\216\213\347\274\251\347\211\210.bat" "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\350\275\273\345\276\256\345\216\213\347\274\251\347\211\210.bat" new file mode 100644 index 0000000..60e0cd0 --- /dev/null +++ "b/ffmpeg/Gif&Apng/convert/Gif2Apng/gif2png-\350\275\273\345\276\256\345\216\213\347\274\251\347\211\210.bat" @@ -0,0 +1,37 @@ +@mode con cols=63 lines=40 +@echo off +chcp 65001 >nul +title GIF转工具APNG v1.1 By 王小明 + +set "ffmpeg=ffmpeg" +set /A whitebgd = 1 +set "PATH=%PATH%;"%~dp0"" +if "%~1"=="" ( + + cd /D "%~dp0" +) else ( + cd /D "%~f1" +) +echo. +mkdir temp >nul 2>nul +for /F %%i in ('dir /b^|findstr .gif$') do ( + echo 【正在处理 %%i 】 + echo. >nul + if %whitebgd%==1 ( + copy "%%i" "temp\%%~ni_%%3d.gif" >nul 2>nul + ) else ( + %ffmpeg% -hide_banner -loglevel quiet -i "%%i" "temp\%%~ni_%%3d.gif" -f image2 >nul 2>nul + ) + cd temp + REM 生成APNG + %ffmpeg% -v warning -i "%%~ni_%%3d.gif" -plays 0 -vf "setpts=PTS-STARTPTS,fps=fps=20" -y "..\%%~ni.apng" >nul 2>nul + echo 【%%i 转换完成】 + cd .. + ren "%%~ni.apng" "%%~ni.png" >nul 2>nul + echo 【%%~ni.apng to %%~ni.png 重命名完成】 + echo. + ) +rd /s /q temp +echo 【全部文件处理完成,按任意键退出】 +pause >nul +exit \ No newline at end of file diff --git "a/ffmpeg/Gif&Apng/gif\350\275\254apng.md" "b/ffmpeg/Gif&Apng/gif\350\275\254apng.md" new file mode 100644 index 0000000..088105e --- /dev/null +++ "b/ffmpeg/Gif&Apng/gif\350\275\254apng.md" @@ -0,0 +1,21 @@ +# Apng + + apng格式诞生于2004年,是基于png格式的动画格式图片, + 它的动图后缀依然是.png,apng格式相对GIF格式有更多的优势, + 在色彩方面它完美支持1600万种颜色, + 对于渐变透明的元素,有着非常优秀的成像效果. + + 预览apng动图需要拖拽到浏览器中才可以查看动画, + 所以传播性没有GIF图强。 + + +# gif 2 apng + +> ffmpeg.exe -y -i "L02.gif" -plays 0 -vf "setpts=PTS-STARTPTS" 232.apng + + + +>ffmpeg.exe -y -i "GIF.gif" -plays 0 -vf "setpts=PTS-STARTPTS, hqdn3d=1.5:1.5:6:6, scale=300:300" -r 20 232.apng + + + 转完改下后缀 png 完事 diff --git a/ffmpeg/README.md b/ffmpeg/README.md new file mode 100644 index 0000000..ac2569d --- /dev/null +++ b/ffmpeg/README.md @@ -0,0 +1,138 @@ +# ffmpeg +### 这是个开源的软件 可以帮助我们对音频视频的文件进行各种转换切割操作 + + 以下 示例仅提供 mp3比特率转换demo + + 更多 操作 请 使用 度娘 + + +---------------------------- + 食用方法 linux +---------------------------- + +### fmmpeg 下载地址 +### https://ffmpeg.org/releases/ffmpeg-4.0.tar.bz2 + 本教程使用上诉地址作为示范 为当前最新版本 +### 官方下载地址 http://ffmpeg.org/download.html + +### lame (mp3编码器) 下载地址 https://jaist.dl.sourceforge.net/project/lame/lame/3.100/lame-3.100.tar.gz + +### 官方地址历史版本 https://sourceforge.net/projects/lame/files/lame/ + +### 还得准备一下 yasm http://www.tortall.net/projects/yasm/releases/yasm-1.3.0.tar.gz + + 使用 rz 上传 ffmpeg-4.0.tar.bz2 3.100/lame-3.100.tar.gz yasm-1.3.0.tar.gz + //如果没安装 就安装一下yum install lrzsz + //rz 上传 sz 下载 还蛮方便 + + //注意 一定要 1.3.0 版本的。 1.2.0的版本过低了 貌似 ffmpeg 3.4以后的版本就已经不支持啦 + //编译的时候会提示yasm版本过低 + + +### 依次解压编译安装 + +#### yasm 安装 + tar -zxvf yasm-1.3.0.tar.gz + cd yasm-1.3.0 + ./configure + make + sudo make install + +#### mp3编码器 安装 + + tar -zxvf lame-3.100.tar.gz + cd lame-3.100 + ./configure + make + sudo make install + +#### ffmpeg 安装 + + bzip2 -d ffmpeg-4.0.tar.bz2 + tar xvf ffmpeg-4.0.tar + + cd ffmpeg-4.0/ + ./configure --enable-libmp3lame + make  + sudo make install + +#### 在 ~/.bashrc 或者 ~/.bash_profile 中加入环境变量 + + cd + vi .bash_profile + + 加入 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib + + Esc :wq + + //生效环境变量 + source .bash_profile + + //让动态链接库为系统所共享 + sudo ldconfig + +### 这时候 我们输入 ffmpeg 可以查看当前版本 以及使用的编码器 + + [ojbk@VM_0_11_centos ~]$ ffmpeg + ffmpeg version 4.0 Copyright (c) 2000-2018 the FFmpeg developers + built with gcc 4.8.5 (GCC) 20150623 (Red Hat 4.8.5-28) + configuration: --enable-libmp3lame + libavutil 56. 14.100 / 56. 14.100 + libavcodec 58. 18.100 / 58. 18.100 + libavformat 58. 12.100 / 58. 12.100 + libavdevice 58. 3.100 / 58. 3.100 + libavfilter 7. 16.100 / 7. 16.100 + libswscale 5. 1.100 / 5. 1.100 + libswresample 3. 1.100 / 3. 1.100 + Hyper fast Audio and Video encoder + usage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}... + + Use -h to get full help or, even better, run 'man ffmpeg' + +### 基本使用 + + 随便 上传一个 音乐 文件 使用 ffmpeg -i 文件名 可以查看 该音乐的详细信息 + + ffmpeg -i 'Waiting For Love.mp3' + + + 转换 这个音乐文件比特率为 320 我现在想转换成 128 + ffmpeg -y -i 'Waiting For Love.mp3' -map 0:0 -acodec libmp3lame -ab 128k -f mp3 newFile.mp3 + + + + 比特率 降低了 文件大小自然就 变小了。 原本 10M 现在 只有 4M + + + 只需要安装更多的 编码器 就能支持更多 转换操作 + + + 安装新的 解码器后 从新安装下 ffmpeg 即在 ./configure后加入--enable-[插件] + 例如: + +  ./configure --enable-libmp3lame --enable-libvorbis --enable-gpl --enable-version3 --enable-nonfree --enable-pthreads --enable-libfaac --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libx264 --enable-libxvid --enable-postproc --enable-ffserver --enable-ffplay + make  + sudo  make install  + +---------------------------- + 食用方法 windows +---------------------------- + + + 下载window版 直接解压 使用命令就完事 了哦 + + win + R + + cmd + + D: + + cd D:\ffmpeg\bin + + ffmpeg + + 可以看出 windows 版的内置很多的编码器了直接用 就完事了。 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/ffmpeg1.png?raw=true) + + \ No newline at end of file diff --git "a/ffmpeg/ffmpeg\350\277\233\351\230\266\347\257\207.md" "b/ffmpeg/ffmpeg\350\277\233\351\230\266\347\257\207.md" new file mode 100644 index 0000000..57de231 --- /dev/null +++ "b/ffmpeg/ffmpeg\350\277\233\351\230\266\347\257\207.md" @@ -0,0 +1,75 @@ +#使用 FFmpeg 进行视频转码 + + 比如 我们要讲mp4的视频换成 mkv 格式的 -i 参数是 转换文件必须的 + 可以使用命令 ffmpeg -i 【输入文件】 【输出文件】 + + 加入 -y 参数就是如果 ojbk.mkv已经存在会覆盖掉,不然他会提示你文件以及存在了你需要手动输入y才能继续执行 + + ffmpeg -y -i d:/ojbk.mp4 d:/ojbk.mkv + + 发现 转换完成后 视频 丢失 了音频源 因为 没有指定音频编码 mp4默认使用 aac音频编码格式 那么我们 也给 输出文件指定这个编码 + 【这里的-c:a 是 -acodec的简写 】 + ffmpeg -y -i d:/ojbk.mp4 -c:a aac d:/ojbk.mkv + + 发现 执行失败 报出错误 + The encoder 'aac' is experimental but experimental codecs + are not enabled, add '-strict -2' if you want to use it. + + 提示已经很明显了 缺少参数 我们 增加 '-strict -2' 再试一次 + + ffmpeg -y -i d:/ojbk.mp4 -c:a aac -strict -2 d:/ojbk.mkv + + 转换完成后打开视频 听到了 久违的声音。 + + 还可以指定 视频编码 -c:v hevc 这时候 这个视频转换就会使用 hevc编码 。【这里的-c:v 是 -vcodec的简写 】 + + ffmpeg -y -i d:/ojbk.mp4 -c:v hevc -c:a aac -strict -2 d:/ojbk.mkv + +### 将视频尺寸转换为 `640x360` 由于视频的尺寸发生了改变因此对视频进行重新编码,但音频不用。 + + ffmpeg -y -i d:/ojbk.mp4 -c:v h264 -c:a copy -s 640x360 d:/newojbk.mp4 + +### 单纯的转换格式 可以 直接使用 -c copy + + ffmpeg -y -i d:\ojbk.mp4 -c copy d:\ojbk.mkv + + +### 把视频的 前10s去掉 + ffmpeg -y -i d:\ojbk.mp4 -c copy -ss 10 d:\newojbk.mp4 + +### 截取一段视频 从这个视频的 3分25秒开始 到 4分 25秒 也就是 一分钟的视频 -ss 表示 开始时间 -t表示 持续时间 + + ffmpeg -y -i d:\ojbk.mp4 -c copy -ss 03:25 -t 1:00 d:\newojbk.mp4 + + +## 能分割就能应该能组合 --> 将两段小视频组合 + 首先在 D盘 创建 一个 ojbk.txt + + 在ojbk.txt中写入 + + file newojbk1.mp4 + file newojbk2.mp4 + + 保存 + + 执行以下命令 不出意外 就是 按照你写的顺序开始拼接的一段新视频 newojbk3 + ffmpeg -f concat -i d:/ojbk.txt -c copy d:/newojbk3.mp4 + +## 视频提取音频流视频流 除去视频中带英文字母广告信息等 + + ffmpeg -y -i Parasite.2019.KOREAN.1080p.WEBRip.x264.AAC2.0-NOGRP.mkv -map 0:0 -map 0:1 -c:v h264 -c:a copy jishengchong.mp4 + + 加上自己的中文字幕 + + ffmpeg -y -i jishengchong.mp4 -vf subtitles=PARASITE.2019.720p.HDRip.H264.AAC-NonDRM.ass jishengc.mp4 + + 服务器后台运行 就使用 + nohup ffmpeg {xxx省略xxx} 1>ffmpeg.log 2>&1 & + + + 注意 服务器 字体问题,可能导致 合成后 是框框口口口口。 + 这时候需要查看 ffmpeg 执行过程中的日志 查看是缺少什么字体文件,可以 上传相应的字体 + 到/usr/share/fonts/dejavu + 再 + fc-cache -fv + \ No newline at end of file diff --git "a/ffmpeg/webp\345\233\276\347\211\207\345\210\266\344\275\234.md" "b/ffmpeg/webp\345\233\276\347\211\207\345\210\266\344\275\234.md" new file mode 100644 index 0000000..667f806 --- /dev/null +++ "b/ffmpeg/webp\345\233\276\347\211\207\345\210\266\344\275\234.md" @@ -0,0 +1,73 @@ +# Webp 图片格式 + + 说起这个webp 我8012年 也是第一次听说,我见不多识不广,但我会百度。 + + WebP是Google推出的影像技术,它可让网页图档有效进行压缩, + + 同时又不影响图片格式兼容与实际清晰度,进而让整体网页下载速度加快。 + + 总之 ,牛逼~ + + (Ie 浏览器现在不支持的~) + + +# 使用 ffmpeg 获取webp 格式图片 + + 动图静图格式都是webp + +## 举个栗子 + + 在windows环境下 解压就能用 + + linux 的话 ffmpeg 在编译时需要加 --enable-libwebp + + ffmpeg 编码时两个参数说明 + -lossless 无损压缩规范。默认值:0(1是可逆压缩) + -qscale [0~100]关系到图片的质量。默认值为75。 + + 以下将一张图 1.jpg 转换为 webp 的ffmpeg 命令 + +>ffmpeg -y -i 1.jpg -vcodec libwebp -qscale 90 1.webp + + 上面的是有损压缩 ,下面是 无损压缩 + +>ffmpeg -y -i 1.jpg -vcodec libwebp -lossless 1 2.webp + + +## 调整大小 + + 如果需要调整大小 需要 增加参数 + + -vf scale=w:h + + 其中w是宽度,h是长度。默认都是-1,就是不做修改。 + + 等比调整 只需要修改其中一个为具体的值 另一个为-1即可。 + 自定义比例当然是 自己设定 w 和 h 的值啦~ + 还可以设置成原图尺寸的一个计算公式,比如-vf scale=iw/2:ih/2。 + +## 视频转webp + + -ss 02:30 -t 10 【 截取这个 mp4从2分30s开始 截10s】 + + -loop 0 【代表 循环播放】 + + preset + 预置:默认为default + none + 不使用任何预设 + default + 自动指定 + picture + 肖像照片 + photo + 风景照片 + drawing + 绘制 + icon + 多彩与小尺寸 + text + 文本 字符居中 + + +>ffmpeg -y -i ojbk.mp4 -vcodec libwebp -qscale 60 -ss 02:30 -t 10 -preset default -loop 0 -vf scale=640:-1,fps=15 -an -vsync 0 ojbk.webp diff --git a/ffmpeg/x264.md b/ffmpeg/x264.md new file mode 100644 index 0000000..8394f82 --- /dev/null +++ b/ffmpeg/x264.md @@ -0,0 +1,11 @@ +## X264 + +>wget http://download.videolan.org/pub/x264/snapshots/last_x264.tar.bz2 +>tar xjvf last_x264.tar.bz2 +>cd x264-snapshot* +>./configure --enable-shared --disable-asm +>make +>sudo make install + + +#### --enable-libvpx --enable-libx264 diff --git "a/ffmpeg/yum\345\256\211\350\243\205ffmpeg.md" "b/ffmpeg/yum\345\256\211\350\243\205ffmpeg.md" new file mode 100644 index 0000000..f9be039 --- /dev/null +++ "b/ffmpeg/yum\345\256\211\350\243\205ffmpeg.md" @@ -0,0 +1,39 @@ +# yum 安装更加方便 无需 自己安装各种编解码插件 + +### 1.升级系统 + + sudo yum install epel-release -y + sudo yum update -y + +### 2.安装Nux Dextop Yum 源 + + 由于CentOS没有官方FFmpeg rpm软件包。但是,我们可以使用第三方YUM源(Nux Dextop)完成此工作。 http://li.nux.ro + +>sudo rpm --import http://li.nux.ro/download/nux/RPM-GPG-KEY-nux.ro + + CentOS 7 + +>sudo rpm -Uvh http://li.nux.ro/download/nux/dextop/el7/x86_64/nux-dextop-release-0-5.el7.nux.noarch.rpm + + CentOS 6 + +>sudo rpm -Uvh http://li.nux.ro/download/nux/dextop/el6/x86_64/nux-dextop-release-0-2.el6.nux.noarch.rpm + +### 3.安装FFmpeg 和 FFmpeg开发包 + +>sudo yum install ffmpeg ffmpeg-devel -y + +### 4.测试是否安装成功 + +>ffmpeg + +若出现 一大溜 说明安装成功 + +若出现-bash: ffmpeg: command not found 说安装失败了 + + + +### 5. 如果出现安装失败 + +>sudo yum clean + 然后重复上述步骤完事 \ No newline at end of file diff --git "a/ffmpeg/\347\274\226\350\257\221\345\256\211\350\243\205\351\227\256\351\242\230.md" "b/ffmpeg/\347\274\226\350\257\221\345\256\211\350\243\205\351\227\256\351\242\230.md" new file mode 100644 index 0000000..1d5f361 --- /dev/null +++ "b/ffmpeg/\347\274\226\350\257\221\345\256\211\350\243\205\351\227\256\351\242\230.md" @@ -0,0 +1,39 @@ +##在ffmpeg版本为4.0 增加libx265 时遇到找不到pkg-config + + ERROR: x265 not found using pkg-config + + +##解决办法 在configure前 export PKG_CONFIG_PATH + + +>export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:$PKG_CONFIG_PATH + +>./configure --enable-libmp3lame --enable-gpl --enable-libx265 +### ubuntu 使用这个解决上面的问题 +>sudo apt-get install pkg-config + + +##编译安装 libx265 + +下载地址 https://bitbucket.org/multicoreware/x265/downloads/ + +>tar zxvf x265_2.5.tar.gz + +>cd x265_2.5/build/linux + +>./make-Makefiles.bash + +>make + +>sudo make install + + +如果执行 ./make-Makefiles.bash 遇到 需要配置的情况 具体情况具体分析,我装2.8时候 直接 先按 c 再按 q 然后make 、 sudo make install + +###ubuntu 系统 apt-get Centos 是yum +如果出现 cmake: command not found +>sudo apt-get install cmake + +如果出现 ccmake: command not found +>sudo apt install cmake-curses-gui + diff --git "a/ffmpeg/\350\247\206\351\242\221\345\216\273\351\231\244\346\260\264\345\215\260.md" "b/ffmpeg/\350\247\206\351\242\221\345\216\273\351\231\244\346\260\264\345\215\260.md" new file mode 100644 index 0000000..b47c2dd --- /dev/null +++ "b/ffmpeg/\350\247\206\351\242\221\345\216\273\351\231\244\346\260\264\345\215\260.md" @@ -0,0 +1,66 @@ +# 使用fmmpeg 去除水印 + + 注意水印的添加其实已经破坏了视频, 就算去除了 也会留下疤痕。 + + +# 去除效果对比 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B01.png?raw=true) + + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B02.png?raw=true) + + +# 去除水印 + +1.首先截取一张带有水印的视频的图片放入 画图中工具中 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B03.png?raw=true) + + + 可以注意到 下方有 坐标 1759, 80 + + 也就是 x= 1759 y=80 是你鼠标所指的位置 + + 通过鼠标滑动可以测量出 ‘逼乎’ 这个logo 的长度 也就是150像素左右 高度大概是80 + + 这时候就要确定视频的分辨率了。 然后再去 等比缩放 + + +输入以下命令行 + +>ffmpeg.exe -i bihu.mp4 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B04.png?raw=true) + + 可以看出 视频的分辨率为 848x478 + + Stream #0:0 为 视频流 + + Stream #0:1 为音频流 + + 当然这里我们只需要关心分辨率就够了 + + 这时候我们使用 上述的 视频分辨率 计算一下 logo 的大概位置 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B05.png?raw=true) + + 将鼠标移动到 上述位置 获得坐标 1700, 25 + + 我屏幕的分辨率 1920 * 1080 + + 通过计算 获得 坐标 应该是 750,25 + + 注意 当前参数 x+w 不能大于 848也就是视频的分辨率 因此先随便设置以下参数 将show置为1 等视频 处理完毕可以 看到 方框 再进行相应调整 + +>ffmpeg.exe -y -i bihu.mp4 -vf "delogo=x=750:y=25:w=90:h=50:show=1" -c:a copy 1.mp4 + + 处理完发现 我曹。几乎完美 + +![image](https://github.com/xx13295/wxm/blob/master/images/ffmpeg/%E5%8E%BB%E9%99%A4%E6%B0%B4%E5%8D%B06.png?raw=true) + + 只需要将 原来的show 1 改为0即可 + + +>ffmpeg.exe -y -i bihu.mp4 -vf "delogo=x=750:y=25:w=90:h=50:show=0" -c:a copy 2.mp4 + diff --git "a/frp/FRP\345\272\224\347\224\250\345\237\272\346\234\254\344\275\277\347\224\250.md" "b/frp/FRP\345\272\224\347\224\250\345\237\272\346\234\254\344\275\277\347\224\250.md" new file mode 100644 index 0000000..d9e7941 --- /dev/null +++ "b/frp/FRP\345\272\224\347\224\250\345\237\272\346\234\254\344\275\277\347\224\250.md" @@ -0,0 +1,108 @@ +# frp 应用的基本使用 + + 软件下载 https://github.com/fatedier/frp/releases + + 这里我顺便选了一个 包frp_0.27.0_linux_amd64.tar.gz + +## 公网服务器 + + 下载frp软件 + +>wget https://github.com/fatedier/frp/releases/download/v0.27.0/frp_0.27.0_linux_amd64.tar.gz + + 解压 + +>tar zxvf frp_0.27.0_linux_amd64.tar.gz + + 强迫症重命名一下 + +>mv frp_0.27.0_linux_amd64 frp-0.27.0 + + 进入文件夹 + +>cd frp-0.27.0 + + 编辑 frps.ini文件 + +>vi frps.ini + + [common] + bind_port = 7000 + + #log + log_file = ./frps.log + # debug, info, warn, error + log_level = info + log_max_days = 3 + #token 务必要写的复杂一点防止被别人随便连接 + token = ojbk.frp.ssh + +保存退出(ESC : wq) + + 启动 frps: + +>./frps -c ./frps.ini + + +# 内网服务器 + +### 前面的步骤和 上面的一样 + + 下载frp软件 + +>wget https://github.com/fatedier/frp/releases/download/v0.27.0/frp_0.27.0_linux_amd64.tar.gz + + 解压 + +>tar zxvf frp_0.27.0_linux_amd64.tar.gz + + 强迫症重命名一下 + +>mv frp_0.27.0_linux_amd64 frp-0.27.0 + + 进入文件夹 + +>cd frp-0.27.0 + +### 开始不一样了 + + 由于是客户端嘛所以就是 修改 frpc.ini文件 + + 下面的 x.x.x.x 为你上面安装frp服务端机器的公网ip地址 + + 客户端的 [common] 中的 port 、token 要与 服务端的一致 + + +>vi frpc.ini + + [common] + server_addr = x.x.x.x + server_port = 7000 + token = ojbk.frp.ssh + + [ssh] + type = tcp + local_ip = 127.0.0.1 + local_port = 22 + remote_port = 6000 + +保存退出(ESC : wq) + + 启动 frpc: + +>./frpc -c ./frpc.ini + +# 通过 ssh 访问内网机器 ssh -p 端口 用户@地址 + + 这里我登录另一台腾讯水管机 + + +ssh -p 6000 wxm@x.x.x.x + + 发现成功 ssh 通过公网ip地址 连接上了内网服务器 + + 这样就能在家办公了,手动狗头.jpg + +更多功能》》 https://github.com/fatedier/frp/blob/master/README_zh.md + + diff --git a/frp/README.md b/frp/README.md new file mode 100644 index 0000000..8c436d8 --- /dev/null +++ b/frp/README.md @@ -0,0 +1,34 @@ +# 什么是FRP? + + frp全名Fast Reverse Proxy, 是一个可用于内网穿透的高性能的反向代理应用 + 支持 tcp、 udp 协议,同时也为 http 和 https 应用协议提供了额外的能力, + 主要用于解决一些内网服务没有公网ip但是却需要提供外网访问的问题。 + +[正反向代理](https://github.com/xx13295/MD-Note/blob/master/frp/%E6%AD%A3%E5%90%91%E4%BB%A3%E7%90%86%E4%B8%8E%E5%8F%8D%E5%90%91%E4%BB%A3%E7%90%86.md/) + +![](https://raw.githubusercontent.com/xx13295/MD-Note/master/frp/img/frpjg.png) + + + +# 什么是内网穿透? + + 简单说就是内网中的一台计算机具有自己的内部IP,外网的计算机具有公共的IP,而内部IP是无法直接通过外网来访问的, + 这就需要一种方式来将外网的IP转化为内部的合法IP来进行合法访问。 + + +# [FRP应用基本使用](https://github.com/xx13295/MD-Note/blob/master/frp/FRP%E5%BA%94%E7%94%A8%E5%9F%BA%E6%9C%AC%E4%BD%BF%E7%94%A8.md/) + + +# 关于拥有公网IP的服务器 + + 可以购买云服务商的vps价格相对较便宜 + VirMach、vultr、搬瓦工、阿里云国际轻量云等等 + +### 什么是VPS? + + VPS(Virtual Private Server 虚拟专用服务器)技术,将一台服务器分割成多个虚拟专享服务器的优质服务。 + 实现VPS的技术分为容器技术,和虚拟化技术。在容器或虚拟机中,每个VPS都可分配独立公网IP地址、独立操作系统、 + 实现不同VPS间磁盘空间、内存、CPU资源、进程和系统配置的隔离,为用户和应用程序模拟出“独占”使用计算资源的体验。 + VPS可以像独立服务器一样,重装操作系统,安装程序,单独重启服务器。 + VPS为使用者提供了管理配置的自由,可用于企业虚拟化,也可以用于IDC资源租用。 + diff --git a/frp/frp.sh b/frp/frp.sh new file mode 100644 index 0000000..b528bb9 --- /dev/null +++ b/frp/frp.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# +#Author: wxm +#关闭操作使用的是kill -9 +# +#脚本存放目录 +APP_HOME=/home/wxm/frp-0.27.0 +#脚本启动文件 +APP_MAIN=frps +#脚本配置文件 +APP_CFG=frps.ini + +psid=0 + +checkpid() { + shpid=`ps aux|grep $APP_MAIN|grep -v 'grep'` + if [ -n "$shpid" ]; then + psid=`echo $shpid | awk '{print $2}'` + else + psid=0 + fi +} +start() { + checkpid + if [ $psid -ne 0 ]; then + echo "================================" + echo "warn: $APP_MAIN already started! (pid=$psid)" + echo "================================" + else + echo -n "Starting $APP_MAIN ..." + $APP_HOME/$APP_MAIN -c $APP_HOME/$APP_CFG & + checkpid + if [ $psid -ne 0 ]; then + echo "(pid=$psid) [OK]" + else + echo "[Failed]" + fi + fi +} + +stop() { + checkpid + + if [ $psid -ne 0 ]; then + echo -n "Stopping $APP_MAIN ...(pid=$psid) " + kill -9 $psid + if [ $? -eq 0 ]; then + echo "[OK]" + else + echo "[Failed]" + fi + + checkpid + if [ $psid -ne 0 ]; then + stop + fi + else + echo "================================" + echo "warn: $APP_MAIN is not running" + echo "================================" + fi +} + +status() { + checkpid + + if [ $psid -ne 0 ]; then + echo "$APP_MAIN is running! (pid=$psid)" + else + echo "$APP_MAIN is not running" + fi +} + +case "$1" in + 'start') + start + ;; + 'stop') + stop + ;; + 'restart') + stop + start + ;; + 'status') + status + ;; + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 +esac +exit 0 diff --git "a/frp/frp\344\273\243\347\220\206\345\206\205\347\275\221\346\225\260\346\215\256\345\272\223\347\255\211\346\234\215\345\212\241.md" "b/frp/frp\344\273\243\347\220\206\345\206\205\347\275\221\346\225\260\346\215\256\345\272\223\347\255\211\346\234\215\345\212\241.md" new file mode 100644 index 0000000..bf1f0e1 --- /dev/null +++ "b/frp/frp\344\273\243\347\220\206\345\206\205\347\275\221\346\225\260\346\215\256\345\272\223\347\255\211\346\234\215\345\212\241.md" @@ -0,0 +1,29 @@ +# 数据库服务大头mysql + + 其实这些服务都走tcp 也就是和 ssh 的配置如出一辙 + +### 以mysql为例子 + + 只需将type置为tcp + 设置本地内网mysql的端口和暴露公网的端口就可以了 + + [mysql] + type = tcp + local_ip = 127.0.0.1 + local_port = 3306 + remote_port = 33306 + + +>[mysql] 这个标签是随便写的方便区分具体的业务,写啥都行如[ojbk] 但一定要有这个[xxx]这是规范。 + + 这样就能使用 你公网的ip:33306 端口去访问你的内网mysql 3306端口的数据库服务了。 + + + +#### 代理 web + + [web] + type = http + local_port = 8080 + custom_domains = frp.ojbk.plus + diff --git "a/frp/frp\347\256\241\347\220\206\350\204\232\346\234\254.md" "b/frp/frp\347\256\241\347\220\206\350\204\232\346\234\254.md" new file mode 100644 index 0000000..6cf2384 --- /dev/null +++ "b/frp/frp\347\256\241\347\220\206\350\204\232\346\234\254.md" @@ -0,0 +1,40 @@ +# frp管理脚本 + + 每次启动 ./frps -c frps.ini 关闭 ps -ef|grep frps 再去kill + 稍微有点麻烦 + +于是编写了这个 [脚本](https://github.com/xx13295/MD-Note/blob/master/frp/frp.sh/) + + +>wget https://raw.githubusercontent.com/xx13295/MD-Note/master/frp/frp.sh + +>chmod 755 frp.sh + + +可用参数 {start|stop|restart|status} + + 启动 + +>frp.sh start + + 停止 + +>frp.sh stop + + + +## 注意 + +启动前 请修改脚本 的以下参数 改为你的frp存放文件夹 + + + APP_HOME=/home/wxm/frp-0.27.0 + + + +当前脚本 是服务端脚本,客户端脚本也是一样的,只需要将脚本中 的 +APP_MAIN和APP_CFG改成客户端的即可如下: + + + APP_MAIN=frpc + APP_CFG=frpc.ini \ No newline at end of file diff --git "a/frp/frp\351\235\231\346\200\201\350\265\204\346\272\220.md" "b/frp/frp\351\235\231\346\200\201\350\265\204\346\272\220.md" new file mode 100644 index 0000000..c8a822c --- /dev/null +++ "b/frp/frp\351\235\231\346\200\201\350\265\204\346\272\220.md" @@ -0,0 +1,29 @@ +# 使用 rfp 做静态资源 + +优点,可以增加用户名密码控制访问。 当然[nginx](https://github.com/xx13295/MD-Note/blob/master/nginx/nginx%E9%9D%99%E6%80%81%E8%B5%84%E6%BA%90%E5%A2%9E%E5%8A%A0%E8%AE%BF%E9%97%AE%E6%8E%A7%E5%88%B6.md)也可以。 + +缺点,页面丑的可以。 + +### 在原有的客户端配置增加如下配置 + + [plugin_static_file] + type = tcp + remote_port = 23456 + plugin = static_file + plugin_local_path = /home/wxm/ojbk + plugin_strip_prefix = static + plugin_http_user = admin + plugin_http_passwd = 123456 + + +#### 启动客户端 + +>./frpc -c ./frpc.ini + +#### 浏览器访问服务端ip:23456/static/ + +>http://x.x.x.x:23456/static/ + + 输入 admin /123456 即可看到 服务器路径 /home/wxm/ojbk 下的资源 + + 下载速度主要根据你公网服务器也就是frpsever的机器的带宽决定 diff --git a/frp/img/frpjg.png b/frp/img/frpjg.png new file mode 100644 index 0000000..84edbbf Binary files /dev/null and b/frp/img/frpjg.png differ diff --git a/frp/img/zfp1.png b/frp/img/zfp1.png new file mode 100644 index 0000000..e54624d Binary files /dev/null and b/frp/img/zfp1.png differ diff --git a/frp/img/zfp2.png b/frp/img/zfp2.png new file mode 100644 index 0000000..70cace2 Binary files /dev/null and b/frp/img/zfp2.png differ diff --git "a/frp/\346\255\243\345\220\221\344\273\243\347\220\206\344\270\216\345\217\215\345\220\221\344\273\243\347\220\206.md" "b/frp/\346\255\243\345\220\221\344\273\243\347\220\206\344\270\216\345\217\215\345\220\221\344\273\243\347\220\206.md" new file mode 100644 index 0000000..aab15dc --- /dev/null +++ "b/frp/\346\255\243\345\220\221\344\273\243\347\220\206\344\270\216\345\217\215\345\220\221\344\273\243\347\220\206.md" @@ -0,0 +1,27 @@ +# 正向代理与反向代理 + + 二话不说先上图好吧,图是偷的,但很经典偷就完事了。 + +![](https://github.com/xx13295/MD-Note/blob/master/frp/img/zfp1.png) + +![](https://github.com/xx13295/MD-Note/blob/master/frp/img/zfp2.png) + + + +## 正向代理 + + 正向代理通过上面的图理解其实就是客户端想从服务器拿资源数据要经过proxy服务器才能拿到 + 这种情况就是 用户已经知道自己要访问谁 然后去访问他,就好比如‘FQ’,原本你想访问google但是被GWF拦截了, + 于是你使用一个梯子成功看到了外面的世界。 + +## 反向代理 + + 反向代理其实就是客户端去访问服务器时,他只知道自己访问到了服务器请求到了资源,并不知道是哪一台服务器返回给他的资源。 + 典型的例子就是负载均衡了如比较常见的nginx + nginx在负载反向代理的算法上,共有五种:轮询、ip_hash、weight、fair(第三方)、url_hash + +### 总结 + + 正向代理和反向代理的主要区别在于代理的对象不一样 + 正向代理的代理对象是客户端 + 反向代理的代理对象是服务端 \ No newline at end of file diff --git a/github/README.md b/github/README.md new file mode 100644 index 0000000..a3d11a7 --- /dev/null +++ b/github/README.md @@ -0,0 +1,13 @@ +# github打不开问题 + + 在hosts 中加入 + + 151.101.113.194 github.global.ssl.fastly.net + 192.30.253.112 github.com + + 199.232.4.133 raw.githubusercontent.com + + +~~已过时~~ + +请查阅: https://github.com/ButterAndButterfly/GithubHost \ No newline at end of file diff --git "a/github/git\344\270\215\345\260\217\345\277\203\346\217\220\344\272\244\344\272\206\351\223\255\346\204\237\350\256\260\345\275\225.md" "b/github/git\344\270\215\345\260\217\345\277\203\346\217\220\344\272\244\344\272\206\351\223\255\346\204\237\350\256\260\345\275\225.md" new file mode 100644 index 0000000..1ece600 --- /dev/null +++ "b/github/git\344\270\215\345\260\217\345\277\203\346\217\220\344\272\244\344\272\206\351\223\255\346\204\237\350\256\260\345\275\225.md" @@ -0,0 +1,14 @@ +#如何删除github commit记录 + +>git reset --soft HEAD~i + + i代表要恢复到多少次提交前的状态, + 如指定i = 2则恢复到最近两次提交前的版本 + --soft代表只删除服务器记录,不删除本地 + +>git push origin master --force + + +然后输入自己的 github用户名 + +加 token令牌就完事了 \ No newline at end of file diff --git a/hibernate validator/README.md b/hibernate validator/README.md new file mode 100644 index 0000000..1a7becc --- /dev/null +++ b/hibernate validator/README.md @@ -0,0 +1,162 @@ +# hibernate validator + + +| 常见的注解 | 作用 | +| -------- | --------| +|@AssertFalse |该字段的值只能为false| +|@AssertTrue |该字段只能为true| +|@DecimalMax |只能小于或等于该值| +|@DecimalMin |只能大于或等于该值| +|@Digits(integer=,fraction=) |检查是否是一种数字的整数、分数,小数位数的数字| +|@Future |检查该字段的日期是否是属于将来的日期| +|@Max |该字段的值只能小于或等于该值| +|@Min |该字段的值只能大于或等于该值| +|@NotNull |不能为null| +|@Null |检查该字段为空| +|@NotBlank |不能为空,检查时会将空格忽略| +|@NotEmpty |不能为空,这里的空是指空字符串| +|@Past |检查该字段的日期是在过去| +|@Pattern(regex=,flag=) |被注释的元素必须符合指定的正则表达式| +|@Size(min=, max=) |检查该字段的size是否在min和max之间,可以是字符串、数组、集合、Map等| +|@CreditCardNumber |对信用卡号进行一个大致的验证| +|@Email |检查是否是一个有效的email地址| +|@Length(min=,max=) |检查所属的字段的长度是否在min和max之间,只能用于字符串| +|@Range(min=,max=,message=) |被注释的元素必须在合适的范围内| +|@SafeHtml |验证用户提供的富文本值,以确保其不包含恶意代码如xss //就是垃圾 建议使用jsoup +|@URL(protocol=,host,port) |检查是否是一个有效的URL| + + + +---------------------------- + 食用方法 +---------------------------- + + +### 首先在maven中引入hibernate-validator + + + + + org.hibernate.validator + hibernate-validator + 6.0.7.Final + + + + + +### 全局异常处理返回 +### GlobalExceptionHandler.java + + import java.util.Set; + + import javax.validation.ConstraintViolation; + import javax.validation.ConstraintViolationException; + import javax.validation.ValidationException; + + import org.springframework.web.bind.annotation.ControllerAdvice; + import org.springframework.web.bind.annotation.ExceptionHandler; + import org.springframework.web.bind.annotation.ResponseBody; + import org.springframework.web.bind.annotation.ResponseStatus; + import org.springframework.http.HttpStatus; + + @ControllerAdvice + public class GlobalExceptionHandler { + + @ExceptionHandler + @ResponseBody + @ResponseStatus(HttpStatus.BAD_REQUEST) + public String handle(ValidationException exception) { + if(exception instanceof ConstraintViolationException){ + ConstraintViolationException cve = (ConstraintViolationException) exception; + Set> violations = cve.getConstraintViolations(); + violations.forEach(o -> System.out.println(o.getMessage())); + } + return "小伙子你不要乱来 我做验证了"; + } + } + + +### ValidationUtils.java + + import java.util.Set; + import javax.validation.ConstraintViolation; + import javax.validation.Validation; + import javax.validation.Validator; + import org.hibernate.validator.HibernateValidator; + + public class ValidationUtils { + + private static Validator validator = Validation + .byProvider(HibernateValidator.class).configure().failFast(true).buildValidatorFactory().getValidator(); + + /** + * + * 实体类参数验证 + * + */ + public static void validate(T obj) { + Set> constraintViolations = validator.validate(obj); + // 抛出检验异常 + if (constraintViolations.size() > 0) { + System.err.println(constraintViolations.iterator().next().getMessage()); + } + } + } + +### User.java + + public class User implements Serializable { + + private static final long serialVersionUID = 1L; + + @Id + private String id; // 用户编号 + + @Size(min = 6, max = 20,message = "用户名长度只能在 6-20之间") + private String userName; // 用户名 + + @Size(min = 6, max = 20) + private String passWord; // 密码 + + private String phone; // 电话号码 + private String email; // 邮箱 + + //getter + //setter + } + +### TestController.java + +### @Validated 注解可以校验RequestParam参数是否符合规则 +### http://127.0.0.1:1024/test?age=9&email=ojbk +### 就会提示不是一个合法的邮箱地址 +### http://127.0.0.1:1024/user +### 就会提示用户名长度只能在 6-20之间 + + @RestController + @Validated + public class TestController { + + @GetMapping(value = "/test") + public void demo(@Range(min = 1, max = 120, message = "年龄只能是1-120岁之间") + @RequestParam(value = "age", required = true) + int age, + @Email + @RequestParam(value = "email", required = true) + String email) { + + System.err.println(age + "-" + email); + } + + @GetMapping("/user") + public void demo2(User user) { + user = new User(); + user.setUserName("wxm"); + user.setEmail("i@ojbk.plus"); + user.setPassWord("1122345"); + user.setPhoneNumber("17666666666"); + ValidationUtils.validate(user); + } + } + diff --git a/influxdb/README.md b/influxdb/README.md new file mode 100644 index 0000000..167eb8d --- /dev/null +++ b/influxdb/README.md @@ -0,0 +1,168 @@ +# influxdb 入门 + + 时序数据库(TSDB)特点 + + 1.持续高并发写入、无更新 + + 2.数据压缩存储 + + 3.低查询延时。 + + 常见 TSDB:influxdb、opentsdb、timeScaladb、Druid 等 + +### influxDB 结构 + +|字段|说明| +|:-|:-| +|database| 数据库 | +|measurement|数据库中的表| +|points|表里面的一行数据| + + +#### Point 数据的构成 + + Point 由时间戳(time)、数据(field)、标签(tags)组成。 + +|Point属性 |说明| +|:-|:-| +|time |记录时间是数据库中的主索引(会自动生成)| +|fields |没有索引的属性| +|tags |有索引的属性| + + +### windows 安装 + + + 目前 已经存在2.0版本 但是windows版本暂时就 1.8.2 + +下载地址: + +>https://dl.influxdata.com/influxdb/releases/influxdb-1.8.2_windows_amd64.zip + + 打开 influxdb-1.8.2文件夹 创建 wal、data、meta 三个文件夹 + + 修改 `influxdb.conf` + + 分别找到 [meta] [data] + + 将底下的 dir 地址改为 相应的目录 + +### 启动 + +> influxd.exe + +### 客户端连接 + +> influx.exe + +毕竟现在没有 开启 鉴权 所以无需 用户名密码 + +### InfluxDB数据库常用命令 + + 1、显示所有数据库 + +>show databases + + 2、 创建数据库 + +>create database ojbk + + 3、 使用某个数据库 + +>use ojbk + + 4、 显示所有表 + +>show measurements + + 5、删除表 + +>drop measurement "tablename" + + 6、显示用户 + +>show users + + 6、创建用户 + +>create user "test" with password 'test' + + 7、创建管理员权限的用户 + +>create user "root" with password 'root' with ALL PRIVILEGES + + + 创建用户后 只需要在 influxdb.conf 中 [http] 标签下 + + 开启 鉴权 + + enabled = true + auth-enabled = true + 下次 在用 客户端连接: + +> influx.exe -username root -password root + + + 8、删除用户 + +>drop user "username" + + 9、插入数据 + + 注意逗号 + +[insert 表,tag1=value1,tag2=value2 field=value1] + +>insert cpu,host=serverA,region=us_west value=0.64 + + 10、查询数据 + +>select * from cpu + + 11、删除数据 + +删除数据 只能 根据 tag 或者time 删除 + +>delete from cpu where time=1599190173708096700 + + +### 连续查询(Continuous Queries) + +当数据超过保存策略里指定的时间之后,就会被删除。 + +如果我们不想完全删除掉,比如做一个数据统计采样: + +把原先每秒的数据,存为每小时的数据,让数据占用的空间大大减少(以降低精度为代价)。 + +这就需要InfluxDB提供的: + +**连续查询(Continuous Queries)** + + 查看当前的查询策略 + +>SHOW CONTINUOUS QUERIES + + 创建新的Continuous Queries + +>CREATE CONTINUOUS QUERY cq_30m ON testDB BEGIN SELECT mean(temperature) INTO weather30m FROM weather GROUP BY time(30m) END + +注释如下: + +|name |说明| +|:-|:-| +|cq_30m|连续查询的名字| +|testDB|具体的数据库名| +|mean(temperature)|算平均温度| +|weather|当前表名| +|weather30m| 存新数据的表名| +|30m|时间间隔为30分钟| + +当我们插入新数据之后,通过SHOW MEASUREMENTS查询发现。 + +可以发现数据库中多了一张名为weather30m(里面已经存着计算好的数据了)。 +这一切都是通过Continuous Queries自动完成的。 + + 删除Continuous Queries + +>DROP CONTINUOUS QUERY ON + diff --git a/influxdb/influxdb.conf b/influxdb/influxdb.conf new file mode 100644 index 0000000..0daecbe --- /dev/null +++ b/influxdb/influxdb.conf @@ -0,0 +1,26 @@ +[meta] + dir = "/var/lib/influxdb/meta" + +[data] + dir = "/var/lib/influxdb/data" + engine = "tsm1" + wal-dir = "/var/lib/influxdb/wal" + + series-id-set-cache-size = 100 + +[http] + # Determines whether HTTP endpoint is enabled. + enabled = true + + # Determines whether the Flux query endpoint is enabled. + # flux-enabled = false + + # Determines whether the Flux query logging is enabled. + # flux-log-enabled = false + + # The bind address used by the HTTP service. + bind-address = ":8086" + + # Determines whether user authentication is enabled over HTTP/HTTPS. + auth-enabled = true + diff --git "a/influxdb/\347\255\226\347\225\245.md" "b/influxdb/\347\255\226\347\225\245.md" new file mode 100644 index 0000000..ed84007 --- /dev/null +++ "b/influxdb/\347\255\226\347\225\245.md" @@ -0,0 +1,18 @@ +show retention policies +##### 当没有切换使用哪个数据库时, +##### 可 on 指定查看哪个数据库的保留策略 +show retention policies on databaseName + + +CREATE retention policy "one_month" ON "my_sensor1" duration 30d replication 1 default + +alter retention policy "one_month" on "my_sensor1" duration 2h replication 1 default + +当duration由d变成h时 需要指定SHARD DURATION 否则出现 `retention policy duration must be greater than the shard duration` + +alter retention policy "one_month" on "my_sensor1" duration 2h replication 1 shard duration 1h default + + +DROP RETENTION POLICY ON + +drop retention policy one_month on my_sensor1 diff --git "a/influxdb/\351\205\215\347\275\256\346\226\207\344\273\266\344\270\255\346\226\207\350\247\243\351\207\212.md" "b/influxdb/\351\205\215\347\275\256\346\226\207\344\273\266\344\270\255\346\226\207\350\247\243\351\207\212.md" new file mode 100644 index 0000000..38aa662 --- /dev/null +++ "b/influxdb/\351\205\215\347\275\256\346\226\207\344\273\266\344\270\255\346\226\207\350\247\243\351\207\212.md" @@ -0,0 +1,353 @@ +全局配置 +reporting-disabled = false +该选项用于上报influxdb的使用信息给InfluxData公司,默认值为false,将此选项设置为true将禁用报告。 + +bind-address = ":8088" +备份恢复时使用,默认值为8088。 + +Metastore配置 +[meta] +本部分控制InfluxDB metastore的参数,该metastore存储有关用户,数据库,保留策略,分片和连续查询的信息。 + +dir = "/var/lib/influxdb/meta" +meta数据存放目录。 + +retention-autocreate = true +用于控制默认存储策咯,数据库创建时,会自动生成autogen的存储策略,默认值:true。 + +logging-enabled = true +是否开启meta日志,默认值:true + +Data配置 +[data] +[data]设置控制InfluxDB的实际分片数据在何处以及如何从预写日志(WAL)中清除。 dir可能需要更改为适合您的系统的位置,但是WAL设置是高级配置。 默认值适用于大多数系统。 + +dir = "/var/lib/influxdb/data" +存储最终数据(TSM文件)的目录,此目录可能会更改。 + +index-version = "inmem" +用于新分片的分片索引的类型,默认inmem索引是在启动时重新创建的内存中索引。要启用基于时间序列(TSI)磁盘的索引,请将其值设置为tsi1。 + +wal-dir = "/var/lib/influxdb/wal" +预写日志(WAL)文件的存储目录。 + +wal-fsync-delay = "0s" +写入在fsyncing之前等待的时间。 持续时间大于0可用于批量处理多个fsync调用。 这对于较慢的磁盘或遇到WAL写入争用时很有用。 每次写入WAL时值为0s fsyncs。 对于非SSD磁盘,建议使用0-100ms范围内的值。 + +validate-keys = false +验证传入的写操作以确保密钥仅具有有效的unicode字符。 因为必须检查每个密钥, 所以此设置会产生少量的开销,默认值false。 + +query-log-enabled = true +是否开启tsm引擎查询日志,默认值: true。 查询日志对于故障排除很有用,但会记录查询中包含的所有敏感数据。 + +trace-logging-enabled = false +是否开启跟踪(trace)日志,默认值:false。 + +TSM引擎设置 +cache-max-memory-size = 1073741824 +用于限定shard最大值,大于该值时会拒绝写入,默认值:1000MB,单位:byte。 + +cache-snapshot-memory-size = "25m" +用于设置快照大小,大于该值时数据会刷新到tsm文件,默认值:25MB,单位:byte。 + +cache-snapshot-write-cold-duration = "10m" +如果分片尚未收到写入或删除操作,则tsm引擎将对缓存进行快照(snapshot)并将其写入新的TSM文件的时间间隔,默认值:10Minute。 + +compact-full-write-cold-duration = "4h" +如果TSM引擎未收到写入或删除操作,则该时间间隔会将所有TSM文件压缩到一个分片中,默认值:4h。 + +max-concurrent-compactions = 0 +可以一次运行的最大并发完全和级别压缩数。 默认值0导致50%的CPU内核在运行时用于压缩。 如果明确设置,则用于压缩的核数将限制为指定值。 此设置不适用于缓存快照,默认值:0。 + +compact-throughput = "48m" +是我们允许TSM压缩写入磁盘的速率限制(以字节/秒为单位)。 请注意,短脉冲串允许以可能更大的值发生,由Compact-Throughput-Burst设置,默认值:48m。 + +compact-throughput-burst = "48m" +是我们允许TSM压缩写入磁盘的速率限制(以每秒字节数为单位),默认值:48m。 + +tsm-use-madv-willneed = false +如果为true,则mmap advise值MADV_WILLNEED会就输入/输出页面调度向内核建议如何处理映射的内存区域。 由于此设置在某些内核(包括CentOS和RHEL)上存在问题,因此默认值为false。 将值更改为true可能会在某些情况下帮助磁盘速度较慢的用户。 + +In-memory(inmen)索引设置 +max-series-per-database = 1000000 +限制数据库的series数,该值为0时取消限制,默认值:1000000。 + +max-values-per-tag = 100000 +一个tag最大的value数,该值为0时取消限制,默认值:100000。 + +TSI(tsi1)索引设置 +max-index-log-file-size = "1m" +索引预写日志(WAL)文件压缩到索引文件中时的阈值(以字节为单位)。 较小的大小将导致日志文件更快地压缩,并导致较低的堆使用率,但会降低写入吞吐量。 更高的大小将更少压缩,在内存中存储更多序列,并提供更高的写入吞吐量。有效大小的后缀为k,m或g(不区分大小写,1024 = 1k)。没有大小后缀的值以字节为单位,默认值:1m。 + +series-id-set-cache-size = 100 +TSI索引中用于存储先前计算的系列结果的内部缓存的大小。 高速缓存的结果将从高速缓存中快速返回,而不是在执行具有匹配的标签键值谓词的后续查询时需要重新计算。 将此值设置为0将禁用缓存,这可能会导致查询性能问题。 仅当已知数据库的所有度量中经常使用的标记键值谓词的集合大于100时,才应增加此值。高速缓存大小的增加可能会导致堆使用率的增加。 + +查询管理设置 +[coordinator] +本部分包含查询管理的配置设置。 + +write-timeout = "10s" +写操作超时时间,默认值: 10s。 + +max-concurrent-queries = 0 +最大并发查询数,0无限制,默认值: 0。 + +query-timeout = "0s" +查询操作超时时间,0无限制,默认值:0s。 + +log-queries-after = "0s" +慢查询超时时间,0无限制,默认值:0s + +max-select-point = 0 +select语句可以处理的最大点数(points),0无限制,默认值:0。 + +max-select-series = 0 +SELECT语句可以处理的最大级数(series),0无限制,默认值:0 + +max-select-buckets = 0 +select语句可以处理的最大"GROUP BY time()"的时间周期,0无限制,默认值:0。 + +保留策咯(Retention policy)设置 +[retention] +[retention]设置用于控制淘汰旧数据的保留策略的执行。 + +enabled = true +是否启用该模块,默认值 : true,设置为false可以防止InfluxDB强制执行保留策略。 + +check-interval = "30m0s" +检查时间间隔,默认值 :“30m”。 + +分区预创建(Shard precreation)设置 +[shard-precreation] +[shard-precreation]设置控制分片的增量,以便在数据到达之前可以使用分片。 只有在创建后将在未来具有开始时间和结束时间的分片才会被创建。 永远不会预先创建全部或部分过去的碎片。 + +enabled = true +是否启用该模块,默认值 : true。 + +check-interval = "10m" +检查时间间隔,默认值 :“10m”。 + +advance-period = "30m" +预创建分区的最大提前时间,默认值 :30m。 + +监控(Monitoring)设置 +[monitor] +[monitor]部分的设置可控制InfluxDB系统的自我监视。 +默认情况下,InfluxDB将数据写入_internal数据库。 如果该数据库不存在,InfluxDB会自动创建它。 _internal数据库上的DEFAULT保留策略为7天。 如果要使用7天保留策略以外的保留策略,则必须创建它。 + +store-enabled = true +是否启用该模块,默认值 :true。 + +store-database = "_internal" +默认数据库:"_internal" + +store-interval = "10s" +统计间隔,默认值:"10s" + +HTTP端点设置 +[http] +[http]部分的设置控制InfluxDB如何配置HTTP端点。 这些是将数据传入和传出InfluxDB的主要机制。 编辑此部分中的设置以启用HTTPS和身份验证。 + +enabled = true +是否启用该模块,默认值 :true。 + +flux-enabled = false +是否启用流查询端点,默认值 :false。 + +flux-log-enabled = false +是否启用流查询日志。 + +bind-address = ":8086" +绑定地址,默认值:":8086"。 + +auth-enabled = false +是否开启认证,默认值:false。 + +log-enabled = true +是否开启http请求日志,默认值:true。 + +suppress-write-log = false +在启用日志时是否应禁止HTTP写入请求日志,默认值:false。 + +write-tracing = false +是否开启写操作日志,如果置成true,每一次写操作都会打日志,默认值:false。 + +pprof-enabled = true +是否开启pprof,此端点用于故障排除和监视,默认值:true。 + +pprof-auth-enabled = false +是否在/debug端点上启用身份验证。 + +debug-pprof-enabled = false +启用默认的/ pprof端点并绑定到localhost:6060。 对于调试启动性能问题很有用。默认值:false。 + +ping-auth-enabled = false +在/ ping,/ metrics和已弃用的/ status端点上启用身份验证。 如果auth-enabled设置为false,则此设置无效。 + +https-enabled = false +确定是否启用HTTPS。 要启用HTTPS,请将值设置为true。 + +https-certificate = "/etc/ssl/influxdb.pem" +启用HTTPS时要使用的SSL证书文件的路径。 + +https-private-key = "" +设置https私钥,无默认值。 + +max-row-limit = 0 +系统在非分块查询中可以返回的最大行数。 默认设置(0)允许无限制的行数。 如果查询结果超过指定的值,则InfluxDB在响应正文中包含一个“ partial”:true标记。 + +max-connection-limit = 0 +一次可以打开的最大连接数。 超出限制的新连接将被删除。 +默认值0禁用该限制。 + +shared-secret = "" +用于使用JWT令牌验证公共API请求的共享密钥。 + +realm = "InfluxDB" +发出基本身份验证质询时发送回的默认域,默认值: “InfluxDB”。 + +unix-socket-enabled = false +通过UNIX域套接字启用HTTP服务。 要通过UNIX域套接字启用HTTP服务,请将值设置为true。 + +bind-socket = "/var/run/influxdb.sock" +unix-socket路径,默认值:"/var/run/influxdb.sock"。 + +max-body-size = 25000000 +客户端请求正文的最大大小(以字节为单位), 将此值设置为0将禁用该限制。默认值:25000000。 + +access-log-path = "" +启用HTTP请求日志记录时,此选项指定应写入日志条目的路径。 + +max-concurrent-write-limit = 0 +并发处理的最大写入次数,将此设置为0将禁用该限制。默认值:0。 + +max-enqueued-write-limit = 0 +排队等待处理的最大写入次数。将此设置为0将禁用该限制。默认值:0。 + +enqueued-write-timeout = 0 +写入等待队列中写入的最长持续时间。将此设置为0或将max-concurrent-write-limit设置为0将禁用该限制。默认值:0。 + +日志(Logging)设置 +[logging] +控制记录器如何将日志发送到输出。 + +format = "auto" +确定用于日志的日志编码器。 可用选项包括auto,logfmt和json。 如果输出终端是TTY,则auto将使用更加用户友好的输出格式,但格式不易于机器读取。 当输出是非TTY时,auto将使用logfmt。默认值:“auto”。 + +level = "info" +确定将发出的日志级别。 可用的级别包括错误,警告,信息和调试。 将发出等于或高于指定级别的日志。默认值:“info”。 + +suppress-logo = false +禁止在程序启动时打印的徽标输出。 如果STDOUT不是TTY,则始终禁止使用徽标。默认值:false。 + +订阅(Subscription)设置 +[subscriber] +[subscriber]部分控制Kapacitor如何接收数据。 + +enabled = true +是否启用该模块,默认值 :true。 + +http-timeout = "30s" +http超时时间,默认值:“30s”。 + +insecure-skip-verify = false +是否允许不安全的证书,当测试自己签发的证书时比较有用。默认值: false。 + +ca-certs = "" +设置CA证书,无默认值。 + +write-concurrency = 40 +设置并发数目,默认值:40。 + +write-buffer-size = 1000 +设置buffer大小,默认值:1000。 + +Graphite设置 +enabled = false +是否启用该模块,默认值 :false。 + +bind-address = ":2003" +默认端口。 + +database = "graphite" +数据库名称,默认值:“graphite”。 + +retention-policy = "" +存储策略,无默认值。 + +consistency-level = "one" +一致性等级,默认值:“one”。 + +tls-enabled = false +是否开启tls,默认值:false。 + +certificate = "/etc/ssl/influxdb.pem" +证书路径,默认值:"/etc/ssl/influxdb.pem"。 + +batch-size = 1000 +这些下一行控制批处理的工作方式。 您应该已启用此功能,否则您可能会丢失指标或性能不佳。 仅通过telnet协议接收的指标进行批处理。如果这么多点被缓冲,请刷新。默认值:1000。 + +batch-pending = 5 +内存中可能挂起的批次数,默认值:5。 + +batch-timeout = "1s" +即使输入未达到配置的批量大小,输入也会至少刷新一次。默认值:“1s”。 + +log-point-errors = true +出错时是否记录日志,默认值:true。 + +UDP设置 +[[udp]] +[[udp]]设置使用UDP控制InfluxDB线路协议数据的侦听器。 + +enabled = false +是否启用该模块,默认值:false。 + +bind-address = ":8089" +绑定地址,默认值:":8089"。 + +database = "udp" +数据库名称,默认值:“udp”。 + +retention-policy = "" +存储策略,无默认值。 + +batch-size = 5000 +接下来的行控制批处理的工作原理。 您应该已启用此功能,否则您可能会丢失指标或性能不佳。 如果有很多进入,批处理将缓冲内存中的点。如果这么多点被缓冲,则刷新,默认值:5000。 + +batch-pending = 10 +如果这么多点被缓冲,请刷新,默认值:10。 + +read-buffer = 0 +udp读取buffer的大小,0表示使用操作系统提供的值,如果超过操作系统的默认配置则会出错。 默认值:0。 + +batch-timeout = "1s" +即使输入未达到配置的批量大小,输入也会至少刷新一次。默认值:“1s”。 + +precision = "" +解码时间值时使用的时间精度。 默认值为纳秒,这是数据库的默认值。 + +连续查询(Continuous queries)设置 +[continuous_queries] +[continuous_queries]设置控制InfluxDB中连续查询(CQ)的运行方式。 连续查询是在最近的时间间隔内执行的自动查询批次。 InfluxDB每个GROUP BY time()间隔执行一个自动生成的查询。 + +log-enabled = true +是否开启日志,默认值:true。 + +enabled = true +是否开启CQs,默认值:true。 + +query-stats-enabled = false +控制是否将查询记录到自我监视数据存储。默认值:false。 + +run-interval = "1s" +检查连续查询是否需要运行的时间间隔,默认值:“1s”。 + +[tls] +InfluxDB中TLS的全局配置设置。 + +min-version = "" +将协商的tls协议的最低版本。 如果未指定,则使用Go的crypto / tls包中的默认设置,默认值:“tls1.2”。 + +max-version = "" +将协商的tls协议的最大版本。 如果未指定,则使用Go的crypto / tls包中的默认设置,默认值:“tls1.2”。 diff --git "a/java/JVM\345\206\205\345\255\230\350\256\276\347\275\256\345\244\247\345\260\217.md" "b/java/JVM\345\206\205\345\255\230\350\256\276\347\275\256\345\244\247\345\260\217.md" new file mode 100644 index 0000000..67f3720 --- /dev/null +++ "b/java/JVM\345\206\205\345\255\230\350\256\276\347\275\256\345\244\247\345\260\217.md" @@ -0,0 +1,63 @@ +## 1.参数的含义 +```aidl + +-vmargs -Xms128M -Xmx512M -XX:PermSize=64M -XX:MaxPermSize=128M +-vmargs 说明后面是VM的参数,所以后面的其实都是JVM的参数了 +-Xms128m JVM初始分配的堆内存 +-Xmx512m JVM最大允许分配的堆内存,按需分配 +-XX:PermSize=64M JVM初始分配的非堆内存 +-XX:MaxPermSize=128M JVM最大允许分配的非堆内存,按需分配 + +我们首先了解一下JVM内存管理的机制,然后再解释每个参数代表的含义。 + +``` + + + +### <1> 堆(Heap)和非堆(Non-heap)内存 + + + 按照官方的说法: + “Java 虚拟机具有一个堆,堆是运行时数据区域, + 所有类实例和数组的内存均从此处分配。堆是在 Java 虚拟机启动时创建的。” + “在JVM中堆之外的内存称为非堆内存(Non-heap memory)”。 + 可以看出JVM主要管理两种类型的内存:堆和非堆。简单来说堆就是Java代码可及的内存, + 是留给开发人员使用的;非堆就是JVM留给自己用的, + 所以方法区、JVM内部处理或优化所需的内存(如JIT编译后的代码缓存)、 + 每个类结构(如运行时常数池、字段和方法数据)以及方法和构造方法的代码都在非堆内存中。 + + +#### 堆内存分配 + + + JVM初始分配的堆内存由-Xms指定,默认是物理内存的1/64; + JVM最大分配的堆内存由-Xmx指定,默认是物理内存的1/4。 + 默认空余堆内存小于40%时,JVM就会增大堆直到-Xmx的最大限制; + 空余堆内存大于70%时,JVM会减少堆直到-Xms的最小限制。 + 因此服务器一般设置-Xms、-Xmx 相等以避免在每次GC 后调整堆的大小。 + 说明: + 如果-Xmx 不指定或者指定偏小,应用可能会导致java.lang.OutOfMemory错误, + 此错误来自JVM,不是Throwable的,无法用try...catch捕捉。 + + +#### 非堆内存分配 + + + JVM使用-XX:PermSize设置非堆内存初始值,默认是物理内存的1/64; + 由XX:MaxPermSize设置最大非堆内存的大小,默认是物理内存的1/4。 + + 网传:MaxPermSize缺省值和-server -client选项相关, + -server选项下默认MaxPermSize为64m, + -client选项下默认MaxPermSize为32m。 + + +### <2>JVM内存限制(最大值) + + + 首先JVM内存限制于实际的最大物理内存, + 假设物理内存无限大的话,JVM内存的最大值跟操作系统有很大的关系。 + 简单的说就32位处理器虽然可控内存空间有4GB, + 但是具体的操作系统会给一个限制, + 这个限制一般是2GB-3GB(一般来说Windows系统下为1.5G-2G,Linux系统下为2G-3G), + 而64bit以上的处理器就不会有限制了。 + diff --git a/java/JWT.md b/java/JWT.md new file mode 100644 index 0000000..e6c0442 --- /dev/null +++ b/java/JWT.md @@ -0,0 +1,96 @@ +## jwt 简易代码 + + 引入依赖 + + + com.auth0 + java-jwt + 3.7.0 + + + +#### 代码 + + +``` + + import com.auth0.jwt.algorithms.Algorithm; + import com.auth0.jwt.exceptions.JWTCreationException; + import com.auth0.jwt.exceptions.JWTVerificationException; + import com.auth0.jwt.exceptions.TokenExpiredException; + import com.auth0.jwt.interfaces.DecodedJWT; + import com.auth0.jwt.interfaces.JWTVerifier; + + import java.util.Date; + + public final class Jwt { + + private static final String[] jwtClaims = {"ojbk.plus"}; + + private static final String secret = "pI4642dg2b7VGbs6"; + + + public static void main(String[] args) throws Exception { + Jwt obj = new Jwt(); + + String token = obj.getToken( "王小明"); + + // 打印token + System.out.println("token: " + token); + // 解密token + DecodedJWT jwt = obj.decoded(token); + System.out.println("issuer: " + jwt.getIssuer()); + System.out.println("name: " + jwt.getClaim("name").asString()); + System.out.println("过期时间: " + jwt.getExpiresAt()); + + } + + + /** + * 生成加密后的token + * @param name + * @return 加密后的token + */ + public String getToken(String name) { + String token = null; + try { + Date expiresAt = new Date(System.currentTimeMillis() + 30L * 24L * 3600L * 1000L); + token = com.auth0.jwt.JWT.create() + .withIssuer(jwtClaims[0]) + .withClaim("name", name) + .withExpiresAt(expiresAt) + // 使用了HMAC256加密算法。 + // secret 是用来加密数字签名的密钥。 + .sign(Algorithm.HMAC256(secret)); + } catch (JWTCreationException e){ + //Invalid Signing configuration / Couldn't convert Claims. + } catch (IllegalArgumentException e) { + e.printStackTrace(); + } + return token; + } + + /** + * 先验证token是否被伪造,然后解码token。 + * @param token 字符串token + * @return 解密后的DecodedJWT对象,可以读取token中的数据。 + */ + public DecodedJWT decoded(String token) { + DecodedJWT jwt = null; + try { + JWTVerifier verifier = com.auth0.jwt.JWT.require(Algorithm.HMAC256(secret)) + .withIssuer(jwtClaims[0]) + .build(); //Reusable verifier instance + jwt = verifier.verify(token); + } catch (JWTVerificationException e){ + //Invalid signature/claims /TokenExpired + if(e instanceof TokenExpiredException){ + System.err.println("说明该token已经过期"); + } + e.printStackTrace(); + } + return jwt; + } + } + +``` \ No newline at end of file diff --git a/java/README.md b/java/README.md new file mode 100644 index 0000000..cde7f9e --- /dev/null +++ b/java/README.md @@ -0,0 +1,18 @@ +# 你好,世界!![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) + + public class HelloWorld { + public static void main(String ... args) { + System.out.println(randomString(-229985452)+' '+randomString(-147909649)); + } + + public static String randomString(int seed) { + Random rand = new Random(seed); + StringBuilder sb = new StringBuilder(); + while(true) { + int n = rand.nextInt(27); + if (n == 0) break; + sb.append((char) ('`' + n)); + } + return sb.toString(); + } + } \ No newline at end of file diff --git a/java/cors.md b/java/cors.md new file mode 100644 index 0000000..c1f241b --- /dev/null +++ b/java/cors.md @@ -0,0 +1,69 @@ +``` + + +import java.io.IOException; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.springframework.boot.web.servlet.FilterRegistrationBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpMethod; +import org.springframework.http.MediaType; +import org.springframework.util.StringUtils; +import org.springframework.web.filter.OncePerRequestFilter; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +@Configuration +public class WebMvcConfiguration implements WebMvcConfigurer{ + + @Bean + public FilterRegistrationBean corsFilter (){ + FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(); + filterRegistrationBean.setFilter(new OncePerRequestFilter() { + + @Override + protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, + FilterChain filterChain) throws ServletException, IOException { + String origin = request.getHeader(HttpHeaders.ORIGIN); + + if (StringUtils.hasText(origin)) { + + response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, origin); + + String requestHeaders = request.getHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS); + if (StringUtils.hasText(requestHeaders)) { + response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS, requestHeaders); + } + + // If the browser version is too low, "*" it may not be supported. + response.addHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, "*"); + + response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); + + response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, "GET, POST, PUT, OPTIONS, DELETE"); + + if (HttpMethod.OPTIONS.name().equalsIgnoreCase(request.getMethod())) { + response.setStatus(HttpServletResponse.SC_NO_CONTENT); + response.setContentType(MediaType.TEXT_HTML_VALUE); + response.setCharacterEncoding("utf-8"); + response.setContentLength(0); + response.addHeader(HttpHeaders.ACCESS_CONTROL_MAX_AGE, "1800"); + return; + } + } + filterChain.doFilter(request, response); + } + + }); + filterRegistrationBean.addUrlPatterns("/*"); + filterRegistrationBean.setOrder(Integer.MIN_VALUE); + return filterRegistrationBean; + } +} +``` diff --git "a/java/idea \346\272\220\347\240\201\344\270\213\350\275\275\344\270\215\344\272\206.md" "b/java/idea \346\272\220\347\240\201\344\270\213\350\275\275\344\270\215\344\272\206.md" new file mode 100644 index 0000000..cb56eca --- /dev/null +++ "b/java/idea \346\272\220\347\240\201\344\270\213\350\275\275\344\270\215\344\272\206.md" @@ -0,0 +1,15 @@ +## 有时候idea会出现cannot download sources的情况 + +我们只需要 打开idea右下角的terminal + +输入 + +> mvn dependency:resolve -Dclassifier=sources + +然后回车 + +稍等片刻:看到Build success 说明成功 + +再次下载 + +就可以 查看源码了 diff --git "a/java/idea\346\211\223\344\270\215\345\274\200\351\227\256\351\242\230.md" "b/java/idea\346\211\223\344\270\215\345\274\200\351\227\256\351\242\230.md" new file mode 100644 index 0000000..02e31ee --- /dev/null +++ "b/java/idea\346\211\223\344\270\215\345\274\200\351\227\256\351\242\230.md" @@ -0,0 +1,35 @@ +## + +先查看 idea.log + +发现是端口被占用 + + + +>netsh int ipv4 set dynamicport tcp start=49152 num=16383 + +>netsh int ipv4 set dynamicport udp start=49152 num=16383 + + +``` + +Here's what I've found (link-heavy - sorry in advance): + +By default Windows Vista onward reserves ports 49152 to 65535 as dynamic ports (here) +It seems that updating to the insider preview to get access to WSL2, or maybe at some other time during the insane number of upgrades/cumulative updates/patches, the dynamic port range got reset to 1024-64511 (image below) + +Hyper-V, it appears as if it's part of the Hyper-V Replica service is supposed to reserve ports in this original dynamic range (here) +Due to the screw-up with Windows, and I'm assuming Hyper-V just randomly taking from the complete dynamic range instead of just the upper range of port +QUICK FIX: + +Run the following commands to reset the dynamic port range to what it should be and reboot - this should fix things until Windows messes up the dynamic port range again. This is only for ipv4. I haven't looked at ipv6, but I'd assume it's similar. + +netsh int ipv4 set dynamicport tcp start=49152 num=16383 +netsh int ipv4 set dynamicport udp start=49152 num=16383 + + +In the end, I would still request that JetBrains try to connect over a random open port >1024 rather than a set range just in case something similar happens again (I know that BitTorrent likes to use those ports as well by default from my research on this). + +``` + +https://intellij-support.jetbrains.com/hc/en-us/community/posts/360004973960-Critical-Internal-Error-on-Startup-of-IntelliJ-IDEA-Cannot-Lock-System-Folders- diff --git "a/java/idea\347\277\273\350\257\221\346\217\222\344\273\266TKK\351\224\231\350\257\257.md" "b/java/idea\347\277\273\350\257\221\346\217\222\344\273\266TKK\351\224\231\350\257\257.md" new file mode 100644 index 0000000..1a883a4 --- /dev/null +++ "b/java/idea\347\277\273\350\257\221\346\217\222\344\273\266TKK\351\224\231\350\257\257.md" @@ -0,0 +1,13 @@ +## 加hosts + + 根本问题就是网络问题 + +``` + +203.208.40.66 translate.google.com + +203.208.40.66 translate.googleapis.com + +``` + +再不行 就只能挂代理了 diff --git a/java/jdk.md b/java/jdk.md new file mode 100644 index 0000000..6e365d4 --- /dev/null +++ b/java/jdk.md @@ -0,0 +1,32 @@ +### 下载文件 ![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) + +>wget https://static.ojbk.plus/jdk-linux-x64.tar.gz + + + 究极无敌慢做好心理准备,腾讯小水管233333 + +### 上传到 /usr/local/java 下 + +### 解压 + +>tar -zxvf jdk-linux-x64.tar.gz + +### 删除原文件 + +>rm jdk-linux-x64.tar.gz + +### 修改/etc/profile文件(注意版本) + +>vi /etc/profile + + #JDK + export JAVA_HOME=/usr/local/java/jdk1.8.0_131 + export JRE_HOME=/usr/local/java/jdk1.8.0_131/jre + export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools/jar:$JRE_HOME/lib:$CLASSPATH + export PATH=$JAVA_HOME/bin/:$PATH + +### 保存 Esc :wq + +### 环境变量生效 + +>source /etc/profile diff --git "a/java/mac\345\234\260\345\235\200\345\212\240\345\207\217\346\263\225.md" "b/java/mac\345\234\260\345\235\200\345\212\240\345\207\217\346\263\225.md" new file mode 100644 index 0000000..9ee0e4f --- /dev/null +++ "b/java/mac\345\234\260\345\235\200\345\212\240\345\207\217\346\263\225.md" @@ -0,0 +1,42 @@ +# mac 加减法 + +``` + + public static void main(String[] args) throws Exception { + System.err.println(getMac("ff:ff:ff:ff:ff:01",14)); + } + + /*** + * @author wxm + * @param mac 地址 ff:ff:ff:ff:ff:01 + * @param num 整数为加 负数为减 + * @return ff:ff:ff:ff:ff:0f + */ + public static String getMac(String mac, int num) { + mac = checkMac(mac.replace(":","")); + Long longMac = Long.parseLong(mac, 16); + String tempMac = Long.toHexString(longMac + num).toLowerCase(Locale.getDefault()); + if(tempMac.length() != 12) { + throw new RuntimeException("new mac is error"); + } + return getMac(tempMac); + } + + private static String getMac(String str) { + if (str.length() <=2) { + return str; + } + return str.substring(0, 2) + ":" + getMac(str.substring(2)); + } + + private static String checkMac(String mac) { + Pattern p = Pattern.compile("^[0-9a-fA-F]{12}$"); + Matcher matcher = p.matcher(mac); + if (!matcher.matches()) { + throw new RuntimeException("Mac is error"); + } + return mac; + } + + +``` diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/README.md" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/README.md" new file mode 100644 index 0000000..ae48942 --- /dev/null +++ "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/README.md" @@ -0,0 +1,34 @@ +# 加解密小工具!! + + 1.password-converter 源代码 + 2.pw-conver.jar 打好的jar包 + 3.converter shell脚本 + + +### 准备工作 + 上传 pw-conver.jar 到/home/ojbk/tools/ + 这里的 ojbk 为你的 用户名 没有tools 目录你可以 创建一个(使用 命令 mkdir tools ) + 上传 converter 脚本 到任意 你喜欢的地方 + 这里我选择 放在 bin目录中 /home/ojbk/bin/ + +### 赋予 脚本执行权限 + + chmod 755 converter + + +### 食用方法 + +####加密 + converter -e ojbk + + 得到结果 :aDTVdaFpzQI= +####解密 + converter -d aDTVdaFpzQI= + + 得到结果 :ojbk + + + + + + \ No newline at end of file diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/converter" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/converter" new file mode 100644 index 0000000..2a7fd91 --- /dev/null +++ "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/converter" @@ -0,0 +1,8 @@ +#convter data +JAVA_HOME=/usr/java/jdk1.8.0_131 +TOOL_PATH=/home/ojbk/tools/pw-conver.jar +if [ $# -lt 2 ];then + echo "The number of parameters should be greater than 1" +else + $JAVA_HOME/bin/java -jar $TOOL_PATH $* +fi diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/.settings/org.eclipse.jdt.core.prefs" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/.settings/org.eclipse.jdt.core.prefs" new file mode 100644 index 0000000..bb35fa0 --- /dev/null +++ "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/.settings/org.eclipse.jdt.core.prefs" @@ -0,0 +1,11 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 +org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve +org.eclipse.jdt.core.compiler.compliance=1.8 +org.eclipse.jdt.core.compiler.debug.lineNumber=generate +org.eclipse.jdt.core.compiler.debug.localVariable=generate +org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.source=1.8 diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/PasswordConverter.class" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/PasswordConverter.class" new file mode 100644 index 0000000..d56cece Binary files /dev/null and "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/PasswordConverter.class" differ diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/TDESUtils.class" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/TDESUtils.class" new file mode 100644 index 0000000..5893505 Binary files /dev/null and "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/bin/plus/ojbk/converter/TDESUtils.class" differ diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/PasswordConverter.java" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/PasswordConverter.java" new file mode 100644 index 0000000..5b8f1a3 --- /dev/null +++ "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/PasswordConverter.java" @@ -0,0 +1,54 @@ +package plus.ojbk.converter; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.spec.InvalidKeySpecException; + +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; + +public class PasswordConverter { + + private static final String[] options = {"-e", "-d"}; + public static final String KEY = "e793104vg726apo6c293428bce4efc65d"; + public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException, UnsupportedEncodingException, IOException { + if (args.length < 2) { + System.err.println("The number of parameters should be greater than 1"); + System.exit(1); + } + String option = args[0]; + boolean valid = false; + for (String op : options) { + if (op.equals(option)) { + valid = true; + break; + } + } + if (!valid) { + System.err.println("Invalid option : " + option); + System.exit(1); + } + String data = args[1]; + switch(option) { + case "-e" : encrypt(data);break; + case "-d" : decrypt(data);break; + } + + } + + private static void encrypt(String data) throws InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException, UnsupportedEncodingException, IOException { + TDESUtils threeDES = new TDESUtils(); + String value_encrypt = threeDES.encryptThreeDESECB(URLEncoder.encode(data, "UTF-8"), KEY); + System.out.println(value_encrypt); + } + + private static void decrypt(String data) throws InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException, IOException { + TDESUtils threeDES = new TDESUtils(); + String value_decrypt = threeDES.decryptThreeDESECB(data, KEY); + System.out.println(value_decrypt); + } +} diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/TDESUtils.java" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/TDESUtils.java" new file mode 100644 index 0000000..60a11d4 --- /dev/null +++ "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/password-converter/src/plus/ojbk/converter/TDESUtils.java" @@ -0,0 +1,127 @@ +package plus.ojbk.converter; + +import java.io.IOException; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.spec.InvalidKeySpecException; +import java.util.Base64; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.DESKeySpec; +import javax.crypto.spec.DESedeKeySpec; +import javax.crypto.spec.IvParameterSpec; + +/** + * @author 3DES 加解密 + */ + +public class TDESUtils { + public static final String IV = "f1w-*%1s"; + private static final String DES_CBC_ALGORITHM = "DES/CBC/PKCS5Padding"; + private static final String DES_ECB_ALGORITHM = "DESede/ECB/PKCS5Padding"; + + /** + * DESCBC加密 + * + * @param value 数据源 + * @param key 密钥,长度必须是8的倍数 + * @return 返回加密后的数据 + * @throws Exception + */ + public String encryptDESCBC(final String value, final String key) + throws IOException, InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, + NoSuchPaddingException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException { + // 生成key,同时制定是des还是DESede,两者的key长度要求不同 + final DESKeySpec desKeySpec = new DESKeySpec(key.getBytes("UTF-8")); + final SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DES"); + final SecretKey secretKey = keyFactory.generateSecret(desKeySpec); + // 加密向量 + final IvParameterSpec iv = new IvParameterSpec(IV.getBytes("UTF-8")); + final Cipher cipher = Cipher.getInstance(DES_CBC_ALGORITHM); + cipher.init(Cipher.ENCRYPT_MODE, secretKey, iv); + final byte[] valueByte = cipher.doFinal(value.getBytes("UTF-8")); + // 通过base64,将加密数组转换成字符串 + final Base64.Encoder encoder = Base64.getEncoder(); + return encoder.encodeToString(valueByte); + } + + /** + * DESCBC解密 + * + * @param value 数据源 + * @param key 密钥,长度必须是8的倍数 + * @return 返回解密后的原始数据 + * @throws Exception + */ + public String decryptDESCBC(final String value, final String key) + throws IOException, InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, + NoSuchPaddingException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException { + // 通过base64,将字符串转成byte数组 + final Base64.Decoder decoder = Base64.getDecoder(); + final byte[] textByte = value.getBytes("UTF-8"); + final byte[] byteValue = decoder.decode(textByte); + final DESKeySpec desKeySpec = new DESKeySpec(key.getBytes("UTF-8")); + final SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DES"); + final SecretKey secretKey = keyFactory.generateSecret(desKeySpec); + final IvParameterSpec iv = new IvParameterSpec(IV.getBytes("UTF-8")); + final Cipher cipher = Cipher.getInstance(DES_CBC_ALGORITHM); + cipher.init(Cipher.DECRYPT_MODE, secretKey, iv); + final byte[] retByte = cipher.doFinal(byteValue); + return new String(retByte); + + } + + /** + * 3DESECB加密 + * + * @param value 数据源 + * @param key 必须是长度大于等于 3*8 = 24 位 + * @return 密文 + * @throws Exception + */ + public String encryptThreeDESECB(final String value, final String key) + throws IOException, InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, + NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { + final DESedeKeySpec dks = new DESedeKeySpec(key.getBytes("UTF-8")); + final SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DESede"); + final SecretKey securekey = keyFactory.generateSecret(dks); + final Cipher cipher = Cipher.getInstance(DES_ECB_ALGORITHM); + cipher.init(Cipher.ENCRYPT_MODE, securekey); + final byte[] valueByte = cipher.doFinal(value.getBytes()); + final Base64.Encoder encoder = Base64.getEncoder(); + return encoder.encodeToString(valueByte); + } + + /** + * 3DESECB解密 + * + * @param value 数据源 + * @param key 必须是长度大于等于 3*8 = 24 位 + * @return 明文 + * @throws Exception + */ + public String decryptThreeDESECB(final String value, final String key) + throws IOException, InvalidKeyException, NoSuchAlgorithmException, InvalidKeySpecException, + NoSuchPaddingException, IllegalBlockSizeException, BadPaddingException { + // 通过base64,将字符串转成byte数组 + final Base64.Decoder decoder = Base64.getDecoder(); + final byte[] textByte = value.getBytes("UTF-8"); + final byte[] byteValue = decoder.decode(textByte); + // 解密的key + final DESedeKeySpec dks = new DESedeKeySpec(key.getBytes("UTF-8")); + final SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DESede"); + final SecretKey securekey = keyFactory.generateSecret(dks); + // Chipher对象解密 + final Cipher cipher = Cipher.getInstance(DES_ECB_ALGORITHM); + cipher.init(Cipher.DECRYPT_MODE, securekey); + final byte[] retByte = cipher.doFinal(byteValue); + return new String(retByte); + } + +} \ No newline at end of file diff --git "a/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/pw-conver.jar" "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/pw-conver.jar" new file mode 100644 index 0000000..603b92e Binary files /dev/null and "b/java/\344\275\277\347\224\2503DES\345\212\240\350\247\243\345\257\206\345\210\266\344\275\234\345\257\206\347\240\201\350\275\254\346\215\242\345\260\217\345\267\245\345\205\267/pw-conver.jar" differ diff --git "a/java/\345\256\236\344\275\223\347\261\273\345\276\252\347\216\257\350\265\213\345\200\274.md" "b/java/\345\256\236\344\275\223\347\261\273\345\276\252\347\216\257\350\265\213\345\200\274.md" new file mode 100644 index 0000000..29ca0de --- /dev/null +++ "b/java/\345\256\236\344\275\223\347\261\273\345\276\252\347\216\257\350\265\213\345\200\274.md" @@ -0,0 +1,33 @@ +# 一个朋友问我如何 循环赋值实例类 + + 于是 + +``` + + public static void main(String[] args) throws Exception{ + Ojbk ojbk = new Ojbk(); + + String[] data = {"wxm","666666"}; + + Class cls = ojbk.getClass(); + Field[] fields = cls.getDeclaredFields(); + + for(int i=0; i integer = new AtomicReference<>(0); + AtomicInteger x = new AtomicInteger(); + for (File file1 : file) { + if (file1.isFile()) { + Object[] xori = getXor(file1); + if (xori != null && xori[1] != null){ + x.set((int)xori[1]); + } + break; + } + } + Arrays.stream(file).parallel().forEach(file1 -> { + if (file1.isDirectory()) { + String[] newTargetPath = file1.getPath().split("/|\\\\"); + File targetFile = new File(targetPath+File.separator+newTargetPath[newTargetPath.length - 1]); + if (!targetFile.exists()) { + targetFile.mkdirs(); + } + convert(file1.getPath(),targetPath+File.separator+newTargetPath[newTargetPath.length - 1]); + return; + } + Object[] xor = getXor(file1); + if (x.get() == 0 && xor[1] != null && (int) xor[1] != 0) { + x.set((int) xor[1]); + } + xor[1] = xor[1] == null ? x.get() : xor[1]; + try (InputStream reader = new FileInputStream(file1); + OutputStream writer = + new FileOutputStream(targetPath + File.separator + file1.getName().split("\\.")[0] + (xor[0] != null ? + "." + xor[0] : ""))) { + byte[] bytes = new byte[1024 * 10]; + int b; + while ((b = reader.read(bytes)) != -1) {//这里的in.read(bytes);就是把输入流中的东西,写入到内存中(bytes)。 + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) (int) (bytes[i] ^ (int) xor[1]); + if (i == (b - 1)) { + break; + } + } + writer.write(bytes, 0, b); + writer.flush(); + } + integer.set(integer.get() + 1); + System.out.println(file1.getName() + "(大小:" + ((double) file1.length() / 1000) + "kb,异或值:" + xor[1] + ")," + + "进度:" + integer.get() + + "/" + size); + } catch (Exception e) { + e.printStackTrace(); + } + }); + System.out.println("解析完毕!"); + } + + /** + * 判断图片异或值 + * + * @param file + * @return + */ + private static Object[] getXor(File file) { + Object[] xor = null; + if (file != null) { + byte[] bytes = new byte[4]; + try (InputStream reader = new FileInputStream(file)) { + reader.read(bytes, 0, bytes.length); + } catch (Exception e) { + e.printStackTrace(); + } + xor = getXor(bytes); + } + return xor; + } + + /** + * @param bytes + * @return + */ + private static Object[] getXor(byte[] bytes) { + Object[] xorType = new Object[2]; + int[] xors = new int[3]; + for (Map.Entry type : FILE_TYPE_MAP.entrySet()) { + String[] hex = { + String.valueOf(type.getKey().charAt(0)) + type.getKey().charAt(1), + String.valueOf(type.getKey().charAt(2)) + type.getKey().charAt(3), + String.valueOf(type.getKey().charAt(4)) + type.getKey().charAt(5) + }; + xors[0] = bytes[0] & 0xFF ^ Integer.parseInt(hex[0], 16); + xors[1] = bytes[1] & 0xFF ^ Integer.parseInt(hex[1], 16); + xors[2] = bytes[2] & 0xFF ^ Integer.parseInt(hex[2], 16); + if (xors[0] == xors[1] && xors[1] == xors[2]) { + xorType[0] = type.getValue(); + xorType[1] = xors[0]; + break; + } + } + return xorType; + } + + private final static Map FILE_TYPE_MAP = new HashMap(); + + static { + getAllFileType(); + } + + private static void getAllFileType() { + FILE_TYPE_MAP.put("ffd8ffe000104a464946", "jpg"); //JPEG (jpg) + FILE_TYPE_MAP.put("89504e470d0a1a0a0000", "png"); //PNG (png) + FILE_TYPE_MAP.put("47494638396126026f01", "gif"); //GIF (gif) + FILE_TYPE_MAP.put("49492a00227105008037", "tif"); //TIFF (tif) + FILE_TYPE_MAP.put("424d228c010000000000", "bmp"); //16色位图(bmp) + FILE_TYPE_MAP.put("424d8240090000000000", "bmp"); //24位位图(bmp) + FILE_TYPE_MAP.put("424d8e1b030000000000", "bmp"); //256色位图(bmp) + FILE_TYPE_MAP.put("41433130313500000000", "dwg"); //CAD (dwg) + FILE_TYPE_MAP.put("3c21444f435459504520", "html"); //HTML (html) + FILE_TYPE_MAP.put("3c21646f637479706520", "htm"); //HTM (htm) + FILE_TYPE_MAP.put("48544d4c207b0d0a0942", "css"); //css + FILE_TYPE_MAP.put("696b2e71623d696b2e71", "js"); //js + FILE_TYPE_MAP.put("7b5c727466315c616e73", "rtf"); //Rich Text Format (rtf) + FILE_TYPE_MAP.put("38425053000100000000", "psd"); //Photoshop (psd) + FILE_TYPE_MAP.put("46726f6d3a203d3f6762", "eml"); //Email [Outlook Express 6] (eml) + FILE_TYPE_MAP.put("d0cf11e0a1b11ae10000", "doc"); //MS Excel 注意:word、msi 和 excel的文件头一样 + FILE_TYPE_MAP.put("d0cf11e0a1b11ae10000", "vsd"); //Visio 绘图 + FILE_TYPE_MAP.put("5374616E64617264204A", "mdb"); //MS Access (mdb) + FILE_TYPE_MAP.put("252150532D41646F6265", "ps"); + FILE_TYPE_MAP.put("255044462d312e360d25", "pdf"); //Adobe Acrobat (pdf) + FILE_TYPE_MAP.put("2e524d46000000120001", "rmvb"); //rmvb/rm相同 + FILE_TYPE_MAP.put("464c5601050000000900", "flv"); //flv与f4v相同 + FILE_TYPE_MAP.put("00000020667479706973", "mp4"); + FILE_TYPE_MAP.put("49443303000000000f76", "mp3"); + FILE_TYPE_MAP.put("000001ba210001000180", "mpg"); // + FILE_TYPE_MAP.put("3026b2758e66cf11a6d9", "wmv"); //wmv与asf相同 + FILE_TYPE_MAP.put("524946464694c9015741", "wav"); //Wave (wav) + FILE_TYPE_MAP.put("52494646d07d60074156", "avi"); + FILE_TYPE_MAP.put("4d546864000000060001", "mid"); //MIDI (mid) + FILE_TYPE_MAP.put("504b0304140000000800", "zip"); + FILE_TYPE_MAP.put("526172211a0700cf9073", "rar"); + FILE_TYPE_MAP.put("235468697320636f6e66", "ini"); + FILE_TYPE_MAP.put("504b03040a0000000000", "jar"); + FILE_TYPE_MAP.put("4d5a9000030000000400", "exe");//可执行文件 + FILE_TYPE_MAP.put("3c25402070616765206c", "jsp");//jsp文件 + FILE_TYPE_MAP.put("4d616e69666573742d56", "mf");//MF文件 + FILE_TYPE_MAP.put("3c3f786d6c2076657273", "xml");//xml文件 + FILE_TYPE_MAP.put("efbbbf2f2a0d0a53514c", "sql");//xml文件 + FILE_TYPE_MAP.put("7061636b616765207765", "java");//java文件 + FILE_TYPE_MAP.put("406563686f206f66660d", "bat");//bat文件 + FILE_TYPE_MAP.put("1f8b0800000000000000", "gz");//gz文件 + FILE_TYPE_MAP.put("6c6f67346a2e726f6f74", "properties");//bat文件 + FILE_TYPE_MAP.put("cafebabe0000002e0041", "class");//bat文件 + FILE_TYPE_MAP.put("49545346030000006000", "chm");//bat文件 + FILE_TYPE_MAP.put("04000000010000001300", "mxp");//bat文件 + FILE_TYPE_MAP.put("504b0304140006000800", "docx");//docx文件 + FILE_TYPE_MAP.put("d0cf11e0a1b11ae10000", "wps");//WPS文字wps、表格et、演示dps都是一样的 + FILE_TYPE_MAP.put("6431303a637265617465", "torrent"); + FILE_TYPE_MAP.put("494d4b48010100000200", "264"); + + + FILE_TYPE_MAP.put("6D6F6F76", "mov"); //Quicktime (mov) + FILE_TYPE_MAP.put("FF575043", "wpd"); //WordPerfect (wpd) + FILE_TYPE_MAP.put("CFAD12FEC5FD746F", "dbx"); //Outlook Express (dbx) + FILE_TYPE_MAP.put("2142444E", "pst"); //Outlook (pst) + FILE_TYPE_MAP.put("AC9EBD8F", "qdf"); //Quicken (qdf) + FILE_TYPE_MAP.put("E3828596", "pwl"); //Windows Password (pwl) + FILE_TYPE_MAP.put("2E7261FD", "ram"); //Real Audio (ram) + } +} + + + +``` \ No newline at end of file diff --git "a/java/\346\211\223jar\345\272\224\347\224\250.md" "b/java/\346\211\223jar\345\272\224\347\224\250.md" new file mode 100644 index 0000000..75e0003 --- /dev/null +++ "b/java/\346\211\223jar\345\272\224\347\224\250.md" @@ -0,0 +1,68 @@ +# 添加如下maven 插件 + + + 注意修改 mainClass标签内的启动类 + 以下是两种 打包方式 + +```aidl + + + + + maven-assembly-plugin + 3.0.0 + + + + plus.ojbk.test.TestApplication + + + + jar-with-dependencies + + + + + make-assembly + package + + single + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + plus.ojbk.test.TestApplication + + + + + + com.jolira + onejar-maven-plugin + 1.4.4 + + + + one-jar + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + +``` \ No newline at end of file diff --git "a/java/\346\211\223\345\214\205exe\346\226\207\344\273\266/README.md" "b/java/\346\211\223\345\214\205exe\346\226\207\344\273\266/README.md" new file mode 100644 index 0000000..804fff6 --- /dev/null +++ "b/java/\346\211\223\345\214\205exe\346\226\207\344\273\266/README.md" @@ -0,0 +1,25 @@ +# 打包exe + +已经发布到 https://springboot.io/t/topic/4897/2 + +https://github.com/jrsoftware/issrc + +https://github.com/wixtoolset/wix3/releases/tag/wix3112rtm + +https://raw.githubusercontent.com/jrsoftware/issrc/main/Files/Languages/Unofficial/ChineseSimplified.isl + + +``` + +#define MyVersionInfoVersion "1.0.0.0" +#define MyVersionInfoDescription "java打包exe的demo" +#define MyVersionInfoCopyright "Copyright ©2022-2099 wxm" + + + + +VersionInfoVersion={#MyVersionInfoVersion} +VersionInfoDescription={#MyVersionInfoDescription} +VersionInfoCopyright={#MyVersionInfoCopyright} + +``` diff --git "a/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/CheckFileType.java" "b/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/CheckFileType.java" new file mode 100644 index 0000000..2626f6a --- /dev/null +++ "b/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/CheckFileType.java" @@ -0,0 +1,96 @@ +package plus.ojbk.util; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.web.multipart.MultipartFile; + +import com.alibaba.fastjson.JSONObject; + +public class CheckFileType { + + private static final Logger logger = LoggerFactory.getLogger(CheckFileType.class); + + public final static Map FILE_TYPE = new HashMap(); + + /** + * 魔术值开头相同的 要同时判断 后缀 与魔术对应 方可确定该文件 + */ + static { + FILE_TYPE.put("mp3", "494433"); + FILE_TYPE.put("jpg", "FFD8FF"); + FILE_TYPE.put("png", "89504E47"); + FILE_TYPE.put("gif", "47494638"); + FILE_TYPE.put("xlsx", "504B0304"); // 与zip 相同开头 + FILE_TYPE.put("xls", "D0CF11E0"); + FILE_TYPE.put("zip", "504B0304"); + FILE_TYPE.put("doc", "D0CF11E0"); //与xls 相同开头 + FILE_TYPE.put("pdf", "255044462D312E"); //pdf文件 + FILE_TYPE.put("docx", "504B0304");//与zip 相同开头 + FILE_TYPE.put("rar", "52617221"); + //如果以上 文件类型不能满足你 ,问题不大 。 上传你需要 的文件 64行log打印的 魔术值 抬头自己加入map中即可 + } + + /** + * 调用示例 + * + * JSONObject obj = CheckFileType.getFileByFile(multipartFile); + * if(!obj.getBoolean("state")) { + * System.err.println("文件不合法"); + * } + * + * + */ + public final static JSONObject getFileByFile(MultipartFile file) throws IOException { + byte[] b = new byte[50]; + String fileName =file.getOriginalFilename(); + String fileType= fileName.substring(fileName.lastIndexOf(".") + 1); + InputStream is =file.getInputStream(); + is.read(b); + JSONObject obj = getFileType(b,fileType); + is.close(); + return obj; + } + + /** + * 验证文件 + */ + public final static JSONObject getFileType(byte[] b,String fileType) { + String fileTypeHex = String.valueOf(getFileHexString(b)); + logger.info("文件的魔术值 - {}",fileTypeHex); + logger.info("未验证的文件后缀 - {}",fileType); + JSONObject obj = new JSONObject(); + if(fileTypeHex.toUpperCase().startsWith(FILE_TYPE.get(fileType))) { + obj.put("state", true); + obj.put("type", fileType); + obj.put("msg", "文件合法!"); + }else { + obj.put("state", false); + obj.put("msg", "未知文件!"); + } + return obj; + } + + /** + * 获取文件的字节码 + */ + public final static String getFileHexString(byte[] b) { + StringBuilder stringBuilder = new StringBuilder(); + if (b == null || b.length <= 0) { + return ""; + } + for (int i = 0; i < b.length; i++) { + int v = b[i] & 0xFF; + String hv = Integer.toHexString(v); + if (hv.length() < 2) { + stringBuilder.append(0); + } + stringBuilder.append(hv); + } + return stringBuilder.toString(); + } +} diff --git "a/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/README.md" "b/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/README.md" new file mode 100644 index 0000000..ea2f7af --- /dev/null +++ "b/java/\346\226\207\344\273\266\347\261\273\345\236\213\351\252\214\350\257\201\343\200\220\351\255\224\346\234\257\345\200\274\343\200\221/README.md" @@ -0,0 +1,14 @@ +# 魔术值 + +### 什么是魔术值? + + 魔术值就是文件的最开头的几个用于唯一区别其它文件类型的字节, + 使用魔术值我们就可以很方便的区别不同的文件。 + +## 注意 + + 不同文件类型的魔术值开头可能相同,所以区分的办法就是验证文件后缀和魔术值, + 这样就保证文件是我们想要的文件类型啦。 + + 文件校验很重要! 单纯的判断文件后缀 容易被坏人利用! + diff --git "a/java/\347\272\277\347\250\213\346\261\240.md" "b/java/\347\272\277\347\250\213\346\261\240.md" new file mode 100644 index 0000000..199ff6c --- /dev/null +++ "b/java/\347\272\277\347\250\213\346\261\240.md" @@ -0,0 +1,79 @@ +# ThreadPoolExecutor的重要参数 + + 1、corePoolSize:核心线程数 + * 核心线程会一直存活,及时没有任务需要执行 + * 当线程数小于核心线程数时,即使有线程空闲,线程池也会优先创建新线程处理 + * 设置allowCoreThreadTimeout=true(默认false)时,核心线程会超时关闭 + + 2、queueCapacity:任务队列容量(阻塞队列) + * 当核心线程数达到最大时,新任务会放在队列中排队等待执行 + + 3、maxPoolSize:最大线程数 + * 当线程数>=corePoolSize,且任务队列已满时。线程池会创建新线程来处理任务 + * 当线程数=maxPoolSize,且任务队列已满时,线程池会拒绝处理任务而抛出异常 + + 4、 keepAliveTime:线程空闲时间 + * 当线程空闲时间达到keepAliveTime时,线程会退出,直到线程数量=corePoolSize + * 如果allowCoreThreadTimeout=true,则会直到线程数量=0 + + 5、allowCoreThreadTimeout:允许核心线程超时 + 6、rejectedExecutionHandler:任务拒绝处理器 + * 两种情况会拒绝处理任务: + - 当线程数已经达到maxPoolSize,切队列已满,会拒绝新任务 + - 当线程池被调用shutdown()后,会等待线程池里的任务执行完毕,再shutdown。如果在调用shutdown()和线程池真正shutdown之间提交任务,会拒绝新任务 + * 线程池会调用rejectedExecutionHandler来处理这个任务。如果没有设置默认是AbortPolicy,会抛出异常 + * ThreadPoolExecutor类有几个内部实现类来处理这类情况: + - AbortPolicy 丢弃任务,抛运行时异常 + - CallerRunsPolicy 执行任务 + - DiscardPolicy 忽视,什么都不会发生 + - DiscardOldestPolicy 从队列中踢出最先进入队列(最后一个执行)的任务 + * 实现RejectedExecutionHandler接口,可自定义处理器 + +# 二、ThreadPoolExecutor执行顺序 + + 线程池按以下行为执行任务 + 1. 当线程数小于核心线程数时,创建线程。 + 2. 当线程数大于等于核心线程数,且任务队列未满时,将任务放入任务队列。 + 3. 当线程数大于等于核心线程数,且任务队列已满 + - 若线程数小于最大线程数,创建线程 + - 若线程数等于最大线程数,抛出异常,拒绝任务 + + +# 三、如何设置参数 + + 1、默认值 + * corePoolSize=1 + * queueCapacity=Integer.MAX_VALUE + * maxPoolSize=Integer.MAX_VALUE + * keepAliveTime=60s + * allowCoreThreadTimeout=false + * rejectedExecutionHandler=AbortPolicy() + + 2、如何来设置 + * 需要根据几个值来决定 + - tasks :每秒的任务数,假设为500~1000 + - taskcost:每个任务花费时间,假设为0.1s + - responsetime:系统允许容忍的最大响应时间,假设为1s + * 做几个计算 + - corePoolSize = 每秒需要多少个线程处理? + * threadcount = tasks/(1/taskcost) =tasks*taskcout = (500~1000)*0.1 = 50~100 个线程。corePoolSize设置应该大于50 + * 根据8020原则,如果80%的每秒任务数小于800,那么corePoolSize设置为80即可 + - queueCapacity = (coreSizePool/taskcost)*responsetime + * 计算可得 queueCapacity = 80/0.1*1 = 80。意思是队列里的线程可以等待1s,超过了的需要新开线程来执行 + * 切记不能设置为Integer.MAX_VALUE,这样队列会很大,线程数只会保持在corePoolSize大小,当任务陡增时,不能新开线程来执行,响应时间会随之陡增。 + - maxPoolSize = (max(tasks)- queueCapacity)/(1/taskcost) + * 计算可得 maxPoolSize = (1000-80)/10 = 92 + * (最大任务数-队列容量)/每个线程每秒处理能力 = 最大线程数 + - rejectedExecutionHandler:根据具体情况来决定,任务不重要可丢弃,任务重要则要利用一些缓冲机制来处理 + - keepAliveTime和allowCoreThreadTimeout采用默认通常能满足 + + 3、 以上都是理想值,实际情况下要根据机器性能来决定。如果在未达到最大线程数的情况机器cpu load已经满了,则需要通过升级硬件(呵呵)和优化代码,降低taskcost来处理。 + + +``` + + + + + +``` diff --git "a/java/\347\274\226\347\240\201\351\227\256\351\242\230\347\251\272\346\240\274.md" "b/java/\347\274\226\347\240\201\351\227\256\351\242\230\347\251\272\346\240\274.md" new file mode 100644 index 0000000..32fded6 --- /dev/null +++ "b/java/\347\274\226\347\240\201\351\227\256\351\242\230\347\251\272\346\240\274.md" @@ -0,0 +1,5 @@ +# 空白 + +\\u00AD\\u00A0\\u3000\\u200C\\u200D\\uFEFF\\u200B\\u2800\\u2029\\u2028\\u000D\\u000C\\u000B\\u000A\\u0009\\u0008 + +# .. \ No newline at end of file diff --git "a/java/\350\277\207\346\273\244/List\345\210\206\347\273\204\350\277\207\346\273\244.md" "b/java/\350\277\207\346\273\244/List\345\210\206\347\273\204\350\277\207\346\273\244.md" new file mode 100644 index 0000000..c010fa6 --- /dev/null +++ "b/java/\350\277\207\346\273\244/List\345\210\206\347\273\204\350\277\207\346\273\244.md" @@ -0,0 +1,9 @@ +``` + + +list.stream().collect(Collectors.groupingBy(Entity::getName)).values().stream() + .map(Entitys ->Entitys.stream() + .max(Comparator.comparing(Entity::getId)).get()) + .collect(Collectors.toList()) + +``` diff --git "a/java/\350\277\207\346\273\244/Node.java" "b/java/\350\277\207\346\273\244/Node.java" new file mode 100644 index 0000000..08a5d95 --- /dev/null +++ "b/java/\350\277\207\346\273\244/Node.java" @@ -0,0 +1,39 @@ +package plus.ojbk.test; + +import java.util.ArrayList; +import java.util.List; + +public class Node { + private char root; + private List childList; + private boolean isLeaf; + + public Node(char root) { + this.root = root; + childList = new ArrayList(); + isLeaf = false; + } + + public Node subNode(char c) { + if (childList != null) { + for (Node eachChild : childList) { + if (eachChild.root == c) { + return eachChild; + } + } + } + return null; + } + + public boolean isLeaf() { + return isLeaf; + } + + public void setLeaf(boolean leaf) { + isLeaf = leaf; + } + + public void addChild(Node node) { + this.childList.add(node); + } +} diff --git "a/java/\350\277\207\346\273\244/WordsTree.java" "b/java/\350\277\207\346\273\244/WordsTree.java" new file mode 100644 index 0000000..bf783cf --- /dev/null +++ "b/java/\350\277\207\346\273\244/WordsTree.java" @@ -0,0 +1,96 @@ +package plus.ojbk.test; + +public class WordsTree { + private static Node head; + + public WordsTree() { + head = new Node(' ');// 头结点 + insert("\u00AD"); + insert("\u00A0"); + insert("\u3000"); + insert("\u200C"); + insert("\u200D"); + insert("\uFEFF"); + insert("\u200B"); + insert("\u2800"); + insert("\u2029"); + insert("\u2028"); + //insert("\\u000D"); + insert("\u000C"); + insert("\u000B"); + //insert("\\u000A"); + insert("\u0009"); + insert("\u0008"); + } + + public void insert(String word) { + if (search(word).equals(word)) { + return;// 检测树中是否存在此词 + } + Node node = head; + for (int i = 0; i < word.length(); i++) { + Node child = node.subNode(word.charAt(i)); + if (child != null) { + node = child; + } else { + node.addChild(new Node(word.charAt(i))); + node = node.subNode(word.charAt(i)); + } + } + node.setLeaf(true); + } + + public static String search(String word) { + Node node = head; + String str = ""; + for (int i = 0; i < word.length(); i++) { + if (node.isLeaf()) { + return str; + } + // 在这添加防止内容中的敏感词汇被空格(可以加其他字符)隔开,无法识别 + if (word.charAt(i) == ' ') { + str += word.charAt(i); + continue; + } + if (node.subNode(word.charAt(i)) == null) { + return ""; + } + node = node.subNode(word.charAt(i)); + str += word.charAt(i); + } + if (node.isLeaf() == true) { + return str; + } else { + return ""; + } + } + + /** + * 过滤内容中的相应字符为空字符串 + * @param content + * @return + */ + public static String filterContent(String content) { + return filterContent(content, ""); + } + + /** + * 过滤内容中的相应字符为预设好的值 + * @param content 带过滤内容 + * @param value 预设值 + * @return + */ + public static String filterContent(String content, String value) { + String str = ""; + String newContent = content; + for (int i = 0; i < content.length(); i++) { + str = search(content.substring(i)); + if (!str.equals("")) { + newContent = newContent.replaceAll(str, value); //替换为预设值 + } + i += str.length(); + } + + return newContent; + } +} diff --git a/jpa/README.md b/jpa/README.md new file mode 100644 index 0000000..12021fd --- /dev/null +++ b/jpa/README.md @@ -0,0 +1,51 @@ +# QueryDsl + + # Maven + + com.querydsl + querydsl-jpa + 4.2.1 + + + + com.querydsl + querydsl-apt + 4.2.1 + provided + + + com.mysema.maven + apt-maven-plugin + 1.1.3 + + + + process + + + target/generated-sources/java + com.querydsl.apt.jpa.JPAAnnotationProcessor + + + + + + + +#### 查询类的生成 + + # 需要通过 apt-maven-plugin 插件和querydsl-apt配合来生成检索类 + + * 不同Entity注解可以使用不同的生成策略(com.querydsl.apt.jpa.JPAAnnotationProcessor) + com.querydsl.apt.jpa.JPAAnnotationProcessor //jpa的注解 + com.querydsl.apt.hibernate.HibernateAnnotationProcessor //Hibernate的注解 + + * 使用maven命令: mvn compile -DskipTests + + * 会在目录: target/generated-sources/java 生成与entity同包的查询类(类名都添加了Q字符) 【由上方 maven plugin 中定义的目录】 + + +### 相关代码 + + https://github.com/xx13295/springboot-querydsl + \ No newline at end of file diff --git "a/jpa/\347\272\277\347\250\213\344\270\255save\346\226\271\346\263\225\346\227\240\346\225\210.md" "b/jpa/\347\272\277\347\250\213\344\270\255save\346\226\271\346\263\225\346\227\240\346\225\210.md" new file mode 100644 index 0000000..d7aa611 --- /dev/null +++ "b/jpa/\347\272\277\347\250\213\344\270\255save\346\226\271\346\263\225\346\227\240\346\225\210.md" @@ -0,0 +1,34 @@ +# repository.save 无效 + + 在spring的 @Scheduled定时任务,或者在线程池中使用 jpa 的 repository.save 无效 一般情况就是没有配置对应的事务管理器。 + + +### 1. 配置 + + @Configuration + @EnableTransactionManagement + public class JpaConfiguration { + + /* + @Bean + public JPAQueryFactory jpaQuery(EntityManager entityManager) { + return new JPAQueryFactory(entityManager); + } + */ + + @Autowired + private EntityManagerFactory entityManagerFactory; + + @Bean(name = "transactionManagerJpa") + public PlatformTransactionManager transactionManagerJpa() { + return new JpaTransactionManager(entityManagerFactory); + } + } + +### 2. 扫描 repository 增加 transactionManagerRef + + @EnableJpaRepositories(basePackages = "com.xxx.server.repository.*", transactionManagerRef = "transactionManagerJpa") + +### 3. 在对应的save方法上 增加 Transactional 注解 + + @Transactional(value = "transactionManagerJpa") \ No newline at end of file diff --git a/jrebel/README.md b/jrebel/README.md new file mode 100644 index 0000000..de53c56 --- /dev/null +++ b/jrebel/README.md @@ -0,0 +1,161 @@ +# 远程热部署断点调试 + + Idea 远程断点调试 + + 如果你还在用eclipse 那么接下的教程对你毫无价值。 + +### 准备工作: + + + 首先开启自动编译 点击 左上角 file -> setting + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/1.png?raw=true) + + 接着搜索 compiler + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/2.png?raw=true) + + Ctrl + shift +a 调出 搜索框 输入 registry 。 鼠标点击 标黄的选项如下图 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/3.png?raw=true) + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/4.png?raw=true) + + +### 安装插件 jrebel + + + 在线安装 + 同其它插件安装一样,请按照以下步骤先行按照插件 + File -> Settings... -> Plugins -> 查找 Jrebel + + 以下是网图: + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/5.png?raw=true) + + 按照成功后会提示重启,这时候重启IDEA即可。 + 离线安装 + + 先到官网下载: + +>https://plugins.jetbrains.com/plugin/4441-jrebel-for-intellij + + 点击get 点download 完事 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/6.png?raw=true) + + 点击 小齿轮 选择下面的Install Plugin from disk... + + 选择 离线下载好的 jrebel的插件 安装即可。 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/7.png?raw=true) + + + + +### 安装 jrebel许可证 + 此步骤在linux服务器上执行 + +>wget https://raw.githubusercontent.com/xx13295/MD-Note/master/jrebel/jrebel-license.jar + +>java -jar jrebel-license.jar + + 这里可以使用nohup 启动,即便关闭窗口也不会导致进程立马关闭。 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/8.png?raw=true) + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/9.png?raw=true) + + + 我已经配置过了所以我的右上方是change 正常的情况应该是 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/10.png?raw=true) + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/11.png?raw=true) + + 以上是 关于jrebel的安装与激活使用,接下来开始重头戏。 + + + +## 切换到linux 服务器 + +### 下载 jrebel 远程调试服务安装包 + +>wget http://dl.zeroturnaround.com/jrebel-stable-nosetup.zip + + 解压 + +>unzip jrebel-stable-nosetup.zip + + 进入 jrebel目录设置密码为123456789 +>cd jrebel + +>java -jar jrebel.jar -set-remote-password 123456789 + +### 激活服务端(jrebel/bin) + +>cd bin + + 启动脚本 :./activate.sh (空格)认证服务地址 (空格) 邮箱 。 + 例如下方的格式 + +>./activate.sh http://xxx.xxx.xxx.xxx:8081/bc1fdd38-9be0-4251-a619-e14a4a6c21b91 i@ojbk.plus + + + 配置本地jrebel + + File -> setting ->JRebel ->Startup + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/12.png?raw=true) + + + 回到主界面 把下面两个打勾 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/13.png?raw=true) + + + 打开idea 看到界面右上方 如下图显示 点击Edit Configurations... + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/14.png?raw=true) + + + 点击 + 号 选择Remote + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/15.png?raw=true) + + 可以看到 右边界面 如下图所示, 我们只需要修改Host ,Port随你心情改或者不改都行。 然后直接点击Ok 完事。 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/16.png?raw=true) + + + +## 接着将这个项目 打包到服务器上 + +#### 远程热部署 启动 + + 注意修改 路径 以及后面的jar包 + +>java -agentpath:/home/wxm/jrebel/lib/libjrebel64.so -Drebel.remoting_plugin=true -jar webapp.jar + +#### 远程热部署+远程调试 启动 + +>java -agentpath:/home/wxm/jrebel/lib/libjrebel64.so - agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 -Drebel.remoting_plugin=true -jar webapp.jar + + 指定端口 失效的话就是项目内部的默认配置的端口 或者使用 --server.port=8083 + +>java -agentpath:/home/wxm/jrebel/lib/libjrebel64.so -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 -Drebel.remoting_plugin=true -jar webapp.jar -port 8083 + + + 回到 idea添加远程调试地址 以及刚才启动activate.sh设置的密码 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/17.png?raw=true) + + 启动远程断点服务 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/18.png?raw=true) + + 这样就可以在本地代码中打断点 做线上调试了。 修改代码等编译后点击 + +![image](https://github.com/xx13295/MD-Note/blob/master/jrebel/jrebel-pic/19.png?raw=true) + + 就可以热部署到线上 无需重新打包。 \ No newline at end of file diff --git a/jrebel/jrebel-license.jar b/jrebel/jrebel-license.jar new file mode 100644 index 0000000..5ccebad Binary files /dev/null and b/jrebel/jrebel-license.jar differ diff --git a/jrebel/jrebel-pic/1.png b/jrebel/jrebel-pic/1.png new file mode 100644 index 0000000..6704417 Binary files /dev/null and b/jrebel/jrebel-pic/1.png differ diff --git a/jrebel/jrebel-pic/10.png b/jrebel/jrebel-pic/10.png new file mode 100644 index 0000000..557b873 Binary files /dev/null and b/jrebel/jrebel-pic/10.png differ diff --git a/jrebel/jrebel-pic/11.png b/jrebel/jrebel-pic/11.png new file mode 100644 index 0000000..bcab6f5 Binary files /dev/null and b/jrebel/jrebel-pic/11.png differ diff --git a/jrebel/jrebel-pic/12.png b/jrebel/jrebel-pic/12.png new file mode 100644 index 0000000..3b19414 Binary files /dev/null and b/jrebel/jrebel-pic/12.png differ diff --git a/jrebel/jrebel-pic/13.png b/jrebel/jrebel-pic/13.png new file mode 100644 index 0000000..a66947a Binary files /dev/null and b/jrebel/jrebel-pic/13.png differ diff --git a/jrebel/jrebel-pic/14.png b/jrebel/jrebel-pic/14.png new file mode 100644 index 0000000..d319b27 Binary files /dev/null and b/jrebel/jrebel-pic/14.png differ diff --git a/jrebel/jrebel-pic/15.png b/jrebel/jrebel-pic/15.png new file mode 100644 index 0000000..d5dcf23 Binary files /dev/null and b/jrebel/jrebel-pic/15.png differ diff --git a/jrebel/jrebel-pic/16.png b/jrebel/jrebel-pic/16.png new file mode 100644 index 0000000..7ee93c8 Binary files /dev/null and b/jrebel/jrebel-pic/16.png differ diff --git a/jrebel/jrebel-pic/17.png b/jrebel/jrebel-pic/17.png new file mode 100644 index 0000000..e38e9f0 Binary files /dev/null and b/jrebel/jrebel-pic/17.png differ diff --git a/jrebel/jrebel-pic/18.png b/jrebel/jrebel-pic/18.png new file mode 100644 index 0000000..62ecd3b Binary files /dev/null and b/jrebel/jrebel-pic/18.png differ diff --git a/jrebel/jrebel-pic/19.png b/jrebel/jrebel-pic/19.png new file mode 100644 index 0000000..b126dd4 Binary files /dev/null and b/jrebel/jrebel-pic/19.png differ diff --git a/jrebel/jrebel-pic/2.png b/jrebel/jrebel-pic/2.png new file mode 100644 index 0000000..d0ee2b5 Binary files /dev/null and b/jrebel/jrebel-pic/2.png differ diff --git a/jrebel/jrebel-pic/3.png b/jrebel/jrebel-pic/3.png new file mode 100644 index 0000000..8734dab Binary files /dev/null and b/jrebel/jrebel-pic/3.png differ diff --git a/jrebel/jrebel-pic/4.png b/jrebel/jrebel-pic/4.png new file mode 100644 index 0000000..ea7df12 Binary files /dev/null and b/jrebel/jrebel-pic/4.png differ diff --git a/jrebel/jrebel-pic/5.png b/jrebel/jrebel-pic/5.png new file mode 100644 index 0000000..ad88860 Binary files /dev/null and b/jrebel/jrebel-pic/5.png differ diff --git a/jrebel/jrebel-pic/6.png b/jrebel/jrebel-pic/6.png new file mode 100644 index 0000000..f68c442 Binary files /dev/null and b/jrebel/jrebel-pic/6.png differ diff --git a/jrebel/jrebel-pic/7.png b/jrebel/jrebel-pic/7.png new file mode 100644 index 0000000..34e34e4 Binary files /dev/null and b/jrebel/jrebel-pic/7.png differ diff --git a/jrebel/jrebel-pic/8.png b/jrebel/jrebel-pic/8.png new file mode 100644 index 0000000..5de7379 Binary files /dev/null and b/jrebel/jrebel-pic/8.png differ diff --git a/jrebel/jrebel-pic/9.png b/jrebel/jrebel-pic/9.png new file mode 100644 index 0000000..b7c3b5f Binary files /dev/null and b/jrebel/jrebel-pic/9.png differ diff --git a/kafka/README.md b/kafka/README.md new file mode 100644 index 0000000..34de5c3 --- /dev/null +++ b/kafka/README.md @@ -0,0 +1,99 @@ +# kafka + + kafka解压 + +>tar -xzf kafka_2.12-2.1.0.tgz + + . + +>cd kafka_2.12-2.1.0 + + . + +>vi config/server.properties + + 增加配置 + + delete.topic.enable=true + + 将listeners=PLAINTEXT://:9092 改为 listeners=PLAINTEXT://ip:9092 + + + + 运行kafka需要使用Zookeeper,所以需要先启动Zookeeper + +>cd zookeeper-3.4.13/ + + . + +>bin/zkServer.sh start + + +### 启动kafka + +>bin/kafka-server-start.sh config/server.properties & + + +### 创建一个主题(topic) + +>bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test + + 创建好之后,可以通过运行以下命令,查看已创建的topic信息: + +>bin/kafka-topics.sh --list --zookeeper localhost:2181 + +### 发送消息 & 接收消息 + +>bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test + +>bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning + + +## 设置多个broker集群 + +1.首先为每个broker创建一个配置文件: + +> cp config/server.properties config/server-1.properties +> cp config/server.properties config/server-2.properties + +>vi config/server-1.properties + + broker.id=1 + delete.topic.enable=true + listeners=PLAINTEXT://ip:9093 + log.dir=/tmp/kafka-logs-1 + +>vi config/server-2.properties + + broker.id=2 + delete.topic.enable=true + listeners=PLAINTEXT://ip:9094 + log.dir=/tmp/kafka-logs-2 + + broker.id是集群中每个节点的唯一且永久的名称不可重复。 + +2.启动服务 + +> bin/kafka-server-start.sh config/server-1.properties & + +> bin/kafka-server-start.sh config/server-2.properties & + +3.创建一个新topic[ojbk],把备份设置为:3 + +>bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 1 --topic ojbk + +3.监控 + +>bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic ojbk + + 输出: + + Topic:ojbk PartitionCount:1 ReplicationFactor:3 Configs: + Topic: ojbk Partition: 0 Leader: 0 Replicas: 0,1,2 Isr: 0,1,2 + + + "leader":该节点负责该分区的所有的读和写,每个节点的leader都是随机选择的。 + "replicas":备份的节点列表,无论该节点是否是leader或者目前是否还活着,只是显示。 + "isr":“同步备份”的节点列表,也就是活着的节点并且正在同步leader。 + + \ No newline at end of file diff --git a/kafka/spring-kafka.md b/kafka/spring-kafka.md new file mode 100644 index 0000000..8110b89 --- /dev/null +++ b/kafka/spring-kafka.md @@ -0,0 +1,85 @@ +### spring-kafka + +1.添加maven依赖 + + + org.springframework.kafka + spring-kafka + + + + org.springframework.kafka + spring-kafka-test + test + +2.config + + #============== kafka =================== + # 指定kafka server的地址,集群配多个,中间,逗号隔开 + spring.kafka.bootstrap-servers=192.168.5.167:9092,192.168.5.167:9093,192.168.5.167:9094 + + #=============== provider ======================= + # 写入失败时,重试次数。当leader节点失效,一个repli节点会替代成为leader节点,此时可能出现写入失败, + # 当retris为0时,produce不会重复。retirs重发,此时repli节点完全成为leader节点,不会产生消息丢失。 + spring.kafka.producer.retries=0 + # 每次批量发送消息的数量,produce积累到一定数据,一次发送 + spring.kafka.producer.batch-size=65536 + # produce积累数据一次发送,缓存大小达到buffer.memory就发送数据 + spring.kafka.producer.buffer-memory=524288 + + #procedure要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下: + #acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。在这种情况下,无法保证服务器已收到记录,并且重试配置将不会生效(因为客户端通常不会知道任何故障),为每条记录返回的偏移量始终设置为-1。 + #acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应,在这种情况下,如果leader在确认记录后立即失败,但在将数据复制到所有的副本服务器之前,则记录将会丢失。 + #acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。 + #可以设置的值为:all, -1, 0, 1 + spring.kafka.producer.acks=1 + + # 指定消息key和消息体的编解码方式 + spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer + spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer + + + #=============== consumer ======================= + # 指定默认消费者group id --> 由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groud.id设置组名 + spring.kafka.consumer.group-id=ojbkGroup + # smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest + spring.kafka.consumer.auto-offset-reset=earliest + # enable.auto.commit:true --> 设置自动提交offset + spring.kafka.consumer.enable-auto-commit=true + #如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。 + spring.kafka.consumer.auto-commit-interval=100 + + # 指定消息key和消息体的编解码方式 + spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer + spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer + + +3.消费者 + + @Component + public class ConsumerListener { + + @KafkaListener(topics = "ojbk") + public void onMessage(String message){ + System.out.println(message); + } + + } + +4.生产者 + + @RestController + public class KafkaController { + + @Autowired + private KafkaTemplate msgTemplate; + + @GetMapping("/send") + public String send(@RequestParam String message){ + msgTemplate.send("ojbk", message); + return message; + } + } + + + 我透这也太简单了 \ No newline at end of file diff --git a/linux/InstallNET-20200714.sh b/linux/InstallNET-20200714.sh new file mode 100644 index 0000000..a75caf8 --- /dev/null +++ b/linux/InstallNET-20200714.sh @@ -0,0 +1,858 @@ +#!/bin/bash + +## License: GPL +## It can reinstall Debian, Ubuntu, CentOS system with network. +## Default root password: MoeClub.org +## Blog: https://moeclub.org +## Written By MoeClub.org + +export tmpVER='' +export tmpDIST='' +export tmpURL='' +export tmpWORD='' +export tmpMirror='' +export tmpSSL='' +export tmpINS='' +export ipAddr='' +export ipMask='' +export ipGate='' +export Relese='' +export ddMode='0' +export setNet='0' +export setRDP='0' +export setIPv6='0' +export isMirror='0' +export FindDists='0' +export loaderMode='0' +export IncFirmware='0' +export SpikCheckDIST='0' +export setInterfaceName='0' +export UNKNOWHW='0' +export UNVER='6.4' + +while [[ $# -ge 1 ]]; do + case $1 in + -v|--ver) + shift + tmpVER="$1" + shift + ;; + -d|--debian) + shift + Relese='Debian' + tmpDIST="$1" + shift + ;; + -u|--ubuntu) + shift + Relese='Ubuntu' + tmpDIST="$1" + shift + ;; + -c|--centos) + shift + Relese='CentOS' + tmpDIST="$1" + shift + ;; + -dd|--image) + shift + ddMode='1' + tmpURL="$1" + shift + ;; + -p|--password) + shift + tmpWORD="$1" + shift + ;; + -i|--interface) + shift + interface="$1" + shift + ;; + --ip-addr) + shift + ipAddr="$1" + shift + ;; + --ip-mask) + shift + ipMask="$1" + shift + ;; + --ip-gate) + shift + ipGate="$1" + shift + ;; + --dev-net) + shift + setInterfaceName='1' + ;; + --loader) + shift + loaderMode='1' + ;; + --prefer) + shift + tmpPrefer="$1" + shift + ;; + -a|--auto) + shift + tmpINS='auto' + ;; + -m|--manual) + shift + tmpINS='manual' + ;; + -apt|-yum|--mirror) + shift + isMirror='1' + tmpMirror="$1" + shift + ;; + -rdp) + shift + setRDP='1' + WinRemote="$1" + shift + ;; + -ssl) + shift + tmpSSL="$1" + shift + ;; + -firmware) + shift + IncFirmware="1" + ;; + --ipv6) + shift + setIPv6='1' + ;; + *) + if [[ "$1" != 'error' ]]; then echo -ne "\nInvaild option: '$1'\n\n"; fi + echo -ne " Usage:\n\tbash $(basename $0)\t-d/--debian [\033[33m\033[04mdists-name\033[0m]\n\t\t\t\t-u/--ubuntu [\033[04mdists-name\033[0m]\n\t\t\t\t-c/--centos [\033[33m\033[04mdists-verison\033[0m]\n\t\t\t\t-v/--ver [32/\033[33m\033[04mi386\033[0m|64/amd64]\n\t\t\t\t--ip-addr/--ip-gate/--ip-mask\n\t\t\t\t-apt/-yum/--mirror\n\t\t\t\t-dd/--image\n\t\t\t\t-a/--auto\n\t\t\t\t-m/--manual\n" + exit 1; + ;; + esac + done + +[[ "$EUID" -ne '0' ]] && echo "Error:This script must be run as root!" && exit 1; + +function CheckDependence(){ +FullDependence='0'; +for BIN_DEP in `echo "$1" |sed 's/,/\n/g'` + do + if [[ -n "$BIN_DEP" ]]; then + Founded='0'; + for BIN_PATH in `echo "$PATH" |sed 's/:/\n/g'` + do + ls $BIN_PATH/$BIN_DEP >/dev/null 2>&1; + if [ $? == '0' ]; then + Founded='1'; + break; + fi + done + if [ "$Founded" == '1' ]; then + echo -en "[\033[32mok\033[0m]\t"; + else + FullDependence='1'; + echo -en "[\033[31mNot Install\033[0m]"; + fi + echo -en "\t$BIN_DEP\n"; + fi + done +if [ "$FullDependence" == '1' ]; then + echo -ne "\n\033[31mError! \033[0mPlease use '\033[33mapt-get\033[0m' or '\033[33myum\033[0m' install it.\n\n\n" + exit 1; +fi +} + +function SelectMirror(){ + [ $# -ge 3 ] || exit 1 + Relese="$1" + DIST=$(echo "$2" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + VER=$(echo "$3" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + New=$(echo "$4" |sed 's/\ //g') + [ -n "$Relese" ] || exit 1 + [ -n "$DIST" ] || exit 1 + [ -n "$VER" ] || exit 1 + relese=$(echo $Relese |sed -r 's/(.*)/\L\1/') + if [ "$Relese" == "Debian" ] || [ "$Relese" == "Ubuntu" ]; then + inUpdate=''; [ "$Relese" == "Ubuntu" ] && inUpdate='-updates' + MirrorTEMP="SUB_MIRROR/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${relese}-installer/${VER}/initrd.gz" + elif [ "$Relese" == "CentOS" ]; then + MirrorTEMP="SUB_MIRROR/${DIST}/os/${VER}/isolinux/initrd.img" + fi + [ -n "$MirrorTEMP" ] || exit 1 + MirrorStatus=0 + declare -A MirrorBackup + MirrorBackup=(["Debian0"]="" ["Debian1"]="http://deb.debian.org/debian" ["Debian2"]="http://archive.debian.org/debian" ["Ubuntu0"]="" ["Ubuntu1"]="http://archive.ubuntu.com/ubuntu" ["CentOS0"]="" ["CentOS1"]="http://mirror.centos.org/centos" ["CentOS2"]="http://vault.centos.org") + echo "$New" |grep -q '^http://\|^https://\|^ftp://' && MirrorBackup[${Relese}0]="$New" + for mirror in $(echo "${!MirrorBackup[@]}" |sed 's/\ /\n/g' |sort -n |grep "^$Relese") + do + CurMirror="${MirrorBackup[$mirror]}" + [ -n "$CurMirror" ] || continue + MirrorURL=`echo "$MirrorTEMP" |sed "s#SUB_MIRROR#${CurMirror}#g"` + wget --no-check-certificate --spider --timeout=3 -o /dev/null "$MirrorURL" + [ $? -eq 0 ] && MirrorStatus=1 && break + done + [ $MirrorStatus -eq 1 ] && echo "$CurMirror" || exit 1 +} + +[ -n "$Relese" ] || Relese='Debian' +linux_relese=$(echo "$Relese" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') +clear && echo -e "\n\033[36m# Check Dependence\033[0m\n" + +if [[ "$ddMode" == '1' ]]; then + CheckDependence iconv; + linux_relese='debian'; + tmpDIST='jessie'; + tmpVER='amd64'; + tmpINS='auto'; +fi + +if [[ "$Relese" == 'Debian' ]] || [[ "$Relese" == 'Ubuntu' ]]; then + CheckDependence wget,awk,grep,sed,cut,cat,cpio,gzip,find,dirname,basename; +elif [[ "$Relese" == 'CentOS' ]]; then + CheckDependence wget,awk,grep,sed,cut,cat,cpio,gzip,find,dirname,basename,file,xz; +fi +[ -n "$tmpWORD" ] && CheckDependence openssl + +if [[ "$loaderMode" == "0" ]]; then + [[ -f '/boot/grub/grub.cfg' ]] && GRUBVER='0' && GRUBDIR='/boot/grub' && GRUBFILE='grub.cfg'; + [[ -z "$GRUBDIR" ]] && [[ -f '/boot/grub2/grub.cfg' ]] && GRUBVER='0' && GRUBDIR='/boot/grub2' && GRUBFILE='grub.cfg'; + [[ -z "$GRUBDIR" ]] && [[ -f '/boot/grub/grub.conf' ]] && GRUBVER='1' && GRUBDIR='/boot/grub' && GRUBFILE='grub.conf'; + [ -z "$GRUBDIR" -o -z "$GRUBFILE" ] && echo -ne "Error! \nNot Found grub.\n" && exit 1; +else + tmpINS='auto' +fi + +if [[ -n "$tmpVER" ]]; then + tmpVER="$(echo "$tmpVER" |sed -r 's/(.*)/\L\1/')"; + if [[ "$tmpVER" == '32' ]] || [[ "$tmpVER" == 'i386' ]] || [[ "$tmpVER" == 'x86' ]]; then + VER='i386'; + fi + if [[ "$tmpVER" == '64' ]] || [[ "$tmpVER" == 'amd64' ]] || [[ "$tmpVER" == 'x86_64' ]] || [[ "$tmpVER" == 'x64' ]]; then + if [[ "$Relese" == 'Debian' ]] || [[ "$Relese" == 'Ubuntu' ]]; then + VER='amd64'; + elif [[ "$Relese" == 'CentOS' ]]; then + VER='x86_64'; + fi + fi +fi +[ -z "$VER" ] && VER='amd64' + +if [[ -z "$tmpDIST" ]]; then + [ "$Relese" == 'Debian' ] && tmpDIST='jessie' && DIST='jessie'; + [ "$Relese" == 'Ubuntu' ] && tmpDIST='bionic' && DIST='bionic'; + [ "$Relese" == 'CentOS' ] && tmpDIST='6.10' && DIST='6.10'; +fi + +if [[ -z "$DIST" ]]; then + if [[ "$Relese" == 'Debian' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p' |cut -d'.' -f1)"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '7' ]] && DIST='wheezy'; + [[ "$isDigital" == '8' ]] && DIST='jessie'; + [[ "$isDigital" == '9' ]] && DIST='stretch'; + [[ "$isDigital" == '10' ]] && DIST='buster'; + } + } + LinuxMirror=$(SelectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'Ubuntu' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p')"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '12.04' ]] && DIST='precise'; + [[ "$isDigital" == '14.04' ]] && DIST='trusty'; + [[ "$isDigital" == '16.04' ]] && DIST='xenial'; + [[ "$isDigital" == '18.04' ]] && DIST='bionic'; + } + } + LinuxMirror=$(SelectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'CentOS' ]]; then + SpikCheckDIST='1' + DISTCheck="$(echo "$tmpDIST" |grep -o '[\.0-9]\{1,\}')"; + LinuxMirror=$(SelectMirror "$Relese" "$DISTCheck" "$VER" "$tmpMirror") + ListDIST="$(wget --no-check-certificate -qO- "$LinuxMirror/dir_sizes" |cut -f2 |grep '^[0-9]')" + DIST="$(echo "$ListDIST" |grep "^$DISTCheck" |head -n1)" + [[ -z "$DIST" ]] && { + echo -ne '\nThe dists version not found in this mirror, Please check it! \n\n' + bash $0 error; + exit 1; + } + wget --no-check-certificate -qO- "$LinuxMirror/$DIST/os/$VER/.treeinfo" |grep -q 'general'; + [[ $? != '0' ]] && { + echo -ne "\nThe version not found in this mirror, Please change mirror try again! \n\n"; + exit 1; + } + fi +fi + +if [[ -z "$LinuxMirror" ]]; then + echo -ne "\033[31mError! \033[0mInvaild mirror! \n" + [ "$Relese" == 'Debian' ] && echo -en "\033[33mexample:\033[0m http://deb.debian.org/debian\n\n"; + [ "$Relese" == 'Ubuntu' ] && echo -en "\033[33mexample:\033[0m http://archive.ubuntu.com/ubuntu\n\n"; + [ "$Relese" == 'CentOS' ] && echo -en "\033[33mexample:\033[0m http://mirror.centos.org/centos\n\n"; + bash $0 error; + exit 1; +fi + +if [[ "$SpikCheckDIST" == '0' ]]; then + DistsList="$(wget --no-check-certificate -qO- "$LinuxMirror/dists/" |grep -o 'href=.*/"' |cut -d'"' -f2 |sed '/-\|old\|Debian\|experimental\|stable\|test\|sid\|devel/d' |grep '^[^/]' |sed -n '1h;1!H;$g;s/\n//g;s/\//\;/g;$p')"; + for CheckDEB in `echo "$DistsList" |sed 's/;/\n/g'` + do + [[ "$CheckDEB" == "$DIST" ]] && FindDists='1' && break; + done + [[ "$FindDists" == '0' ]] && { + echo -ne '\nThe dists version not found, Please check it! \n\n' + bash $0 error; + exit 1; + } +fi + +[[ "$ddMode" == '1' ]] && { + export SSL_SUPPORT='https://github.com/xx13295/MD-Note/raw/master/linux/wget_udeb_amd64.tar.gz'; + if [[ -n "$tmpURL" ]]; then + DDURL="$tmpURL" + echo "$DDURL" |grep -q '^http://\|^ftp://\|^https://'; + [[ $? -ne '0' ]] && echo 'Please input vaild URL,Only support http://, ftp:// and https:// !' && exit 1; + [[ -n "$tmpSSL" ]] && SSL_SUPPORT="$tmpSSL"; + else + echo 'Please input vaild image URL! '; + exit 1; + fi +} + +[[ -n "$tmpINS" ]] && { + [[ "$tmpINS" == 'auto' ]] && inVNC='n'; + [[ "$tmpINS" == 'manual' ]] && inVNC='y'; +} + +[ -n "$ipAddr" ] && [ -n "$ipMask" ] && [ -n "$ipGate" ] && setNet='1'; +[[ -n "$tmpWORD" ]] && myPASSWORD="$(openssl passwd -1 "$tmpWORD")"; +[[ -z "$myPASSWORD" ]] && myPASSWORD='$1$4BJZaD0A$y1QykUnJ6mXprENfwpseH0'; + +if [[ -n "$interface" ]]; then + IFETH="$interface" +else + if [[ "$linux_relese" == 'centos' ]]; then + IFETH="link" + else + IFETH="auto" + fi +fi + +clear && echo -e "\n\033[36m# Install\033[0m\n" + +ASKVNC(){ + inVNC='y'; + [[ "$ddMode" == '0' ]] && { + echo -ne "\033[34mDo you want to install os manually?\033[0m\e[33m[\e[32my\e[33m/n]\e[0m " + read tmpinVNC + [[ -n "$inVNCtmp" ]] && inVNC="$tmpinVNC" + } + [ "$inVNC" == 'y' -o "$inVNC" == 'Y' ] && inVNC='y'; + [ "$inVNC" == 'n' -o "$inVNC" == 'N' ] && inVNC='n'; +} + +[ "$inVNC" == 'y' -o "$inVNC" == 'n' ] || ASKVNC; +[[ "$ddMode" == '0' ]] && { + [[ "$inVNC" == 'y' ]] && echo -e "\033[34mManual Mode\033[0m insatll [\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m] in VNC. " + [[ "$inVNC" == 'n' ]] && echo -e "\033[34mAuto Mode\033[0m insatll [\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m]. " +} +[[ "$ddMode" == '1' ]] && { + echo -ne "\033[34mAuto Mode\033[0m insatll \033[33mWindows\033[0m\n[\033[33m$DDURL\033[0m]\n" +} + +if [[ "$linux_relese" == 'centos' ]]; then + if [[ "$DIST" != "$UNVER" ]]; then + awk 'BEGIN{print '${UNVER}'-'${DIST}'}' |grep -q '^-' + if [ $? != '0' ]; then + UNKNOWHW='1'; + echo -en "\033[33mThe version lower then \033[31m$UNVER\033[33m may not support in auto mode! \033[0m\n"; + if [[ "$inVNC" == 'n' ]]; then + echo -en "\033[35mYou can connect VNC with \033[32mPublic IP\033[35m and port \033[32m1\033[35m/\033[32m5901\033[35m in vnc viewer.\033[0m\n" + read -n 1 -p "Press Enter to continue..." INP + [[ "$INP" != '' ]] && echo -ne '\b \n\n'; + fi + fi + awk 'BEGIN{print '${UNVER}'-'${DIST}'+0.59}' |grep -q '^-' + if [ $? == '0' ]; then + echo -en "\n\033[31mThe version higher then \033[33m6.10 \033[31mis not support in current! \033[0m\n\n" + exit 1; + fi + fi +fi + +echo -e "\n[\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m] Downloading..." + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + inUpdate=''; [ "$linux_relese" == 'ubuntu' ] && inUpdate='-updates' + wget --no-check-certificate -qO '/boot/initrd.img' "${LinuxMirror}/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${linux_relese}-installer/${VER}/initrd.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/boot/vmlinuz' "${LinuxMirror}/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${linux_relese}-installer/${VER}/linux" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + MirrorHost="$(echo "$LinuxMirror" |awk -F'://|/' '{print $2}')"; + MirrorFolder="$(echo "$LinuxMirror" |awk -F''${MirrorHost}'' '{print $2}')"; +elif [[ "$linux_relese" == 'centos' ]]; then + wget --no-check-certificate -qO '/boot/initrd.img' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/initrd.img" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/boot/vmlinuz' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/vmlinuz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 +else + bash $0 error; + exit 1; +fi +if [[ "$linux_relese" == 'debian' ]]; then + if [[ "$IncFirmware" == '1' ]]; then + wget --no-check-certificate -qO '/boot/firmware.cpio.gz' "http://cdimage.debian.org/cdimage/unofficial/non-free/firmware/${DIST}/current/firmware.cpio.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'firmware' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + fi + if [[ "$ddMode" == '1' ]]; then + vKernel_udeb=$(wget --no-check-certificate -qO- "http://$DISTMirror/dists/$DIST/main/installer-$VER/current/images/udeb.list" |grep '^acpi-modules' |head -n1 |grep -o '[0-9]\{1,2\}.[0-9]\{1,2\}.[0-9]\{1,2\}-[0-9]\{1,2\}' |head -n1) + [[ -z "vKernel_udeb" ]] && vKernel_udeb="3.16.0-6" + fi +fi + +[[ "$setNet" == '1' ]] && { + IPv4="$ipAddr"; + MASK="$ipMask"; + GATE="$ipGate"; +} || { + DEFAULTNET="$(ip route show |grep -o 'default via [0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.*' |head -n1 |sed 's/proto.*\|onlink.*//g' |awk '{print $NF}')"; + [[ -n "$DEFAULTNET" ]] && IPSUB="$(ip addr |grep ''${DEFAULTNET}'' |grep 'global' |grep 'brd' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}/[0-9]\{1,2\}')"; + IPv4="$(echo -n "$IPSUB" |cut -d'/' -f1)"; + NETSUB="$(echo -n "$IPSUB" |grep -o '/[0-9]\{1,2\}')"; + GATE="$(ip route show |grep -o 'default via [0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}')"; + [[ -n "$NETSUB" ]] && MASK="$(echo -n '128.0.0.0/1,192.0.0.0/2,224.0.0.0/3,240.0.0.0/4,248.0.0.0/5,252.0.0.0/6,254.0.0.0/7,255.0.0.0/8,255.128.0.0/9,255.192.0.0/10,255.224.0.0/11,255.240.0.0/12,255.248.0.0/13,255.252.0.0/14,255.254.0.0/15,255.255.0.0/16,255.255.128.0/17,255.255.192.0/18,255.255.224.0/19,255.255.240.0/20,255.255.248.0/21,255.255.252.0/22,255.255.254.0/23,255.255.255.0/24,255.255.255.128/25,255.255.255.192/26,255.255.255.224/27,255.255.255.240/28,255.255.255.248/29,255.255.255.252/30,255.255.255.254/31,255.255.255.255/32' |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}'${NETSUB}'' |cut -d'/' -f1)"; +} + +[[ -n "$GATE" ]] && [[ -n "$MASK" ]] && [[ -n "$IPv4" ]] || { +echo "Not found \`ip command\`, It will use \`route command\`." +ipNum() { + local IFS='.'; + read ip1 ip2 ip3 ip4 <<<"$1"; + echo $((ip1*(1<<24)+ip2*(1<<16)+ip3*(1<<8)+ip4)); +} + +SelectMax(){ +ii=0; +for IPITEM in `route -n |awk -v OUT=$1 '{print $OUT}' |grep '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}'` + do + NumTMP="$(ipNum $IPITEM)"; + eval "arrayNum[$ii]='$NumTMP,$IPITEM'"; + ii=$[$ii+1]; + done +echo ${arrayNum[@]} |sed 's/\s/\n/g' |sort -n -k 1 -t ',' |tail -n1 |cut -d',' -f2; +} + +[[ -z $IPv4 ]] && IPv4="$(ifconfig |grep 'Bcast' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}' |head -n1)"; +[[ -z $GATE ]] && GATE="$(SelectMax 2)"; +[[ -z $MASK ]] && MASK="$(SelectMax 3)"; + +[[ -n "$GATE" ]] && [[ -n "$MASK" ]] && [[ -n "$IPv4" ]] || { + echo "Error! Not configure network. "; + exit 1; +} +} + +[[ "$setNet" != '1' ]] && [[ -f '/etc/network/interfaces' ]] && { + [[ -z "$(sed -n '/iface.*inet static/p' /etc/network/interfaces)" ]] && AutoNet='1' || AutoNet='0'; + [[ -d /etc/network/interfaces.d ]] && { + ICFGN="$(find /etc/network/interfaces.d -name '*.cfg' |wc -l)" || ICFGN='0'; + [[ "$ICFGN" -ne '0' ]] && { + for NetCFG in `ls -1 /etc/network/interfaces.d/*.cfg` + do + [[ -z "$(cat $NetCFG | sed -n '/iface.*inet static/p')" ]] && AutoNet='1' || AutoNet='0'; + [[ "$AutoNet" -eq '0' ]] && break; + done + } + } +} + +[[ "$setNet" != '1' ]] && [[ -d '/etc/sysconfig/network-scripts' ]] && { + ICFGN="$(find /etc/sysconfig/network-scripts -name 'ifcfg-*' |grep -v 'lo'|wc -l)" || ICFGN='0'; + [[ "$ICFGN" -ne '0' ]] && { + for NetCFG in `ls -1 /etc/sysconfig/network-scripts/ifcfg-* |grep -v 'lo$' |grep -v ':[0-9]\{1,\}'` + do + [[ -n "$(cat $NetCFG | sed -n '/BOOTPROTO.*[dD][hH][cC][pP]/p')" ]] && AutoNet='1' || { + AutoNet='0' && . $NetCFG; + [[ -n $NETMASK ]] && MASK="$NETMASK"; + [[ -n $GATEWAY ]] && GATE="$GATEWAY"; + } + [[ "$AutoNet" -eq '0' ]] && break; + done + } +} + +if [[ "$loaderMode" == "0" ]]; then + [[ ! -f $GRUBDIR/$GRUBFILE ]] && echo "Error! Not Found $GRUBFILE. " && exit 1; + + [[ ! -f $GRUBDIR/$GRUBFILE.old ]] && [[ -f $GRUBDIR/$GRUBFILE.bak ]] && mv -f $GRUBDIR/$GRUBFILE.bak $GRUBDIR/$GRUBFILE.old; + mv -f $GRUBDIR/$GRUBFILE $GRUBDIR/$GRUBFILE.bak; + [[ -f $GRUBDIR/$GRUBFILE.old ]] && cat $GRUBDIR/$GRUBFILE.old >$GRUBDIR/$GRUBFILE || cat $GRUBDIR/$GRUBFILE.bak >$GRUBDIR/$GRUBFILE; +else + GRUBVER='2' +fi + +[[ "$GRUBVER" == '0' ]] && { + READGRUB='/tmp/grub.read' + cat $GRUBDIR/$GRUBFILE |sed -n '1h;1!H;$g;s/\n/%%%%%%%/g;$p' |grep -om 1 'menuentry\ [^{]*{[^}]*}%%%%%%%' |sed 's/%%%%%%%/\n/g' >$READGRUB + LoadNum="$(cat $READGRUB |grep -c 'menuentry ')" + if [[ "$LoadNum" -eq '1' ]]; then + cat $READGRUB |sed '/^$/d' >/tmp/grub.new; + elif [[ "$LoadNum" -gt '1' ]]; then + CFG0="$(awk '/menuentry /{print NR}' $READGRUB|head -n 1)"; + CFG2="$(awk '/menuentry /{print NR}' $READGRUB|head -n 2 |tail -n 1)"; + CFG1=""; + for tmpCFG in `awk '/}/{print NR}' $READGRUB` + do + [ "$tmpCFG" -gt "$CFG0" -a "$tmpCFG" -lt "$CFG2" ] && CFG1="$tmpCFG"; + done + [[ -z "$CFG1" ]] && { + echo "Error! read $GRUBFILE. "; + exit 1; + } + + sed -n "$CFG0,$CFG1"p $READGRUB >/tmp/grub.new; + [[ -f /tmp/grub.new ]] && [[ "$(grep -c '{' /tmp/grub.new)" -eq "$(grep -c '}' /tmp/grub.new)" ]] || { + echo -ne "\033[31mError! \033[0mNot configure $GRUBFILE. \n"; + exit 1; + } + fi + [ ! -f /tmp/grub.new ] && echo "Error! $GRUBFILE. " && exit 1; + sed -i "/menuentry.*/c\menuentry\ \'Install OS \[$DIST\ $VER\]\'\ --class debian\ --class\ gnu-linux\ --class\ gnu\ --class\ os\ \{" /tmp/grub.new + sed -i "/echo.*Loading/d" /tmp/grub.new; + INSERTGRUB="$(awk '/menuentry /{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +[[ "$GRUBVER" == '1' ]] && { + CFG0="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)"; + CFG1="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 2 |tail -n 1)"; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 == $CFG0 ] && sed -n "$CFG0,$"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 != $CFG0 ] && sed -n "$CFG0,$[$CFG1-1]"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ ! -f /tmp/grub.new ]] && echo "Error! configure append $GRUBFILE. " && exit 1; + sed -i "/title.*/c\title\ \'Install OS \[$DIST\ $VER\]\'" /tmp/grub.new; + sed -i '/^#/d' /tmp/grub.new; + INSERTGRUB="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +if [[ "$loaderMode" == "0" ]]; then +[[ -n "$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $2}' |tail -n 1 |grep '^/boot/')" ]] && Type='InBoot' || Type='NoBoot'; + +LinuxKernel="$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $1}' |head -n 1)"; +[[ -z "$LinuxKernel" ]] && echo "Error! read grub config! " && exit 1; +LinuxIMG="$(grep 'initrd.*/' /tmp/grub.new |awk '{print $1}' |tail -n 1)"; +[ -z "$LinuxIMG" ] && sed -i "/$LinuxKernel.*\//a\\\tinitrd\ \/" /tmp/grub.new && LinuxIMG='initrd'; + +if [[ "$setInterfaceName" == "1" ]]; then + Add_OPTION="net.ifnames=0 biosdevname=0"; +else + Add_OPTION=""; +fi + +if [[ "$setIPv6" == "1" ]]; then + Add_OPTION="$Add_OPTION ipv6.disable=1"; +fi + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + BOOT_OPTION="auto=true $Add_OPTION hostname=$linux_relese domain= -- quiet" +elif [[ "$linux_relese" == 'centos' ]]; then + BOOT_OPTION="ks=file://ks.cfg $Add_OPTION ksdevice=$IFETH" +fi + +[[ "$Type" == 'InBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/boot\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/boot\/initrd.img" /tmp/grub.new; +} + +[[ "$Type" == 'NoBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/initrd.img" /tmp/grub.new; +} + +sed -i '$a\\n' /tmp/grub.new; +fi + +[[ "$inVNC" == 'n' ]] && { +GRUBPATCH='0'; + +if [[ "$loaderMode" == "0" ]]; then +[ -f '/etc/network/interfaces' -o -d '/etc/sysconfig/network-scripts' ] || { + echo "Error, Not found interfaces config."; + exit 1; +} + +sed -i ''${INSERTGRUB}'i\\n' $GRUBDIR/$GRUBFILE; +sed -i ''${INSERTGRUB}'r /tmp/grub.new' $GRUBDIR/$GRUBFILE; +[[ -f $GRUBDIR/grubenv ]] && sed -i 's/saved_entry/#saved_entry/g' $GRUBDIR/grubenv; +fi + +[[ -d /tmp/boot ]] && rm -rf /tmp/boot; +mkdir -p /tmp/boot; +cd /tmp/boot; +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + COMPTYPE="gzip"; +elif [[ "$linux_relese" == 'centos' ]]; then + COMPTYPE="$(file /boot/initrd.img |grep -o ':.*compressed data' |cut -d' ' -f2 |sed -r 's/(.*)/\L\1/' |head -n1)" + [[ -z "$COMPTYPE" ]] && echo "Detect compressed type fail." && exit 1; +fi +CompDected='0' +for ListCOMP in `echo -en 'gzip\nlzma\nxz'` + do + if [[ "$COMPTYPE" == "$ListCOMP" ]]; then + CompDected='1' + if [[ "$COMPTYPE" == 'gzip' ]]; then + NewIMG="initrd.img.gz" + else + NewIMG="initrd.img.$COMPTYPE" + fi + mv -f "/boot/initrd.img" "/tmp/$NewIMG" + break; + fi + done +[[ "$CompDected" != '1' ]] && echo "Detect compressed type not support." && exit 1; +[[ "$COMPTYPE" == 'lzma' ]] && UNCOMP='xz --format=lzma --decompress'; +[[ "$COMPTYPE" == 'xz' ]] && UNCOMP='xz --decompress'; +[[ "$COMPTYPE" == 'gzip' ]] && UNCOMP='gzip -d'; + +$UNCOMP < /tmp/$NewIMG | cpio --extract --verbose --make-directories --no-absolute-filenames >>/dev/null 2>&1 + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then +cat >/tmp/boot/preseed.cfg<>/dev/null 2>&1 +} + +[[ "$ddMode" == '1' ]] && { +WinNoDHCP(){ + echo -ne "for\0040\0057f\0040\0042tokens\00753\0052\0042\0040\0045\0045i\0040in\0040\0050\0047netsh\0040interface\0040show\0040interface\0040\0136\0174more\0040\00533\0040\0136\0174findstr\0040\0057I\0040\0057R\0040\0042本地\0056\0052\0040以太\0056\0052\0040Local\0056\0052\0040Ethernet\0042\0047\0051\0040do\0040\0050set\0040EthName\0075\0045\0045j\0051\r\nnetsh\0040\0055c\0040interface\0040ip\0040set\0040address\0040name\0075\0042\0045EthName\0045\0042\0040source\0075static\0040address\0075$IPv4\0040mask\0075$MASK\0040gateway\0075$GATE\r\nnetsh\0040\0055c\0040interface\0040ip\0040add\0040dnsservers\0040name\0075\0042\0045EthName\0045\0042\0040address\00758\00568\00568\00568\0040index\00751\0040validate\0075no\r\n\r\n" >>'/tmp/boot/net.tmp'; +} +WinRDP(){ + echo -ne "netsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075ALL\r\nnetsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075CURRENT\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Network\0134NewNetworkWindowOff\0042\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0042\0040\0057v\0040fDenyTSConnections\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134Wds\0134rdpwd\0134Tds\0134tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040UserAuthentication\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040TermService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040UmRdpService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nSC\0040START\0040TermService\r\n\r\n" >>'/tmp/boot/net.tmp'; +} + echo -ne "\0100ECHO\0040OFF\r\n\r\ncd\0056\0076\0045WINDIR\0045\0134GetAdmin\r\nif\0040exist\0040\0045WINDIR\0045\0134GetAdmin\0040\0050del\0040\0057f\0040\0057q\0040\0042\0045WINDIR\0045\0134GetAdmin\0042\0051\0040else\0040\0050\r\necho\0040CreateObject\0136\0050\0042Shell\0056Application\0042\0136\0051\0056ShellExecute\0040\0042\0045\0176s0\0042\0054\0040\0042\0045\0052\0042\0054\0040\0042\0042\0054\0040\0042runas\0042\0054\00401\0040\0076\0076\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\n\0042\0045temp\0045\0134Admin\0056vbs\0042\r\ndel\0040\0057f\0040\0057q\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\nexit\0040\0057b\00402\0051\r\n\r\n" >'/tmp/boot/net.tmp'; + [[ "$setNet" == '1' ]] && WinNoDHCP; + [[ "$setNet" == '0' ]] && [[ "$AutoNet" == '0' ]] && WinNoDHCP; + [[ "$setRDP" == '1' ]] && [[ -n "$WinRemote" ]] && WinRDP + echo -ne "ECHO\0040SELECT\0040VOLUME\0075\0045\0045SystemDrive\0045\0045\0040\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nECHO\0040EXTEND\0040\0076\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nSTART\0040/WAIT\0040DISKPART\0040\0057S\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nDEL\0040\0057f\0040\0057q\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\n\r\n" >>'/tmp/boot/net.tmp'; + echo -ne "cd\0040\0057d\0040\0042\0045ProgramData\0045\0057Microsoft\0057Windows\0057Start\0040Menu\0057Programs\0057Startup\0042\r\ndel\0040\0057f\0040\0057q\0040net\0056bat\r\n\r\n\r\n" >>'/tmp/boot/net.tmp'; + iconv -f 'UTF-8' -t 'GBK' '/tmp/boot/net.tmp' -o '/tmp/boot/net.bat' + rm -rf '/tmp/boot/net.tmp' + echo "$DDURL" |grep -q '^https://' + [[ $? -eq '0' ]] && { + echo -ne '\nAdd ssl support...\n' + [[ -n $SSL_SUPPORT ]] && { + wget --no-check-certificate -qO- "$SSL_SUPPORT" |tar -x + [[ ! -f /tmp/boot/usr/bin/wget ]] && echo 'Error! SSL_SUPPORT.' && exit 1; + sed -i 's/wget\ -qO-/\/usr\/bin\/wget\ --no-check-certificate\ --retry-connrefused\ --tries=7\ --continue\ -qO-/g' /tmp/boot/preseed.cfg + [[ $? -eq '0' ]] && echo -ne 'Success! \n\n' + } || { + echo -ne 'Not ssl support package! \n\n'; + exit 1; + } + } +} + +[[ "$ddMode" == '0' ]] && { + sed -i '/anna-install/d' /tmp/boot/preseed.cfg + sed -i 's/wget.*\/sbin\/reboot\;\ //g' /tmp/boot/preseed.cfg +} + +elif [[ "$linux_relese" == 'centos' ]]; then +cat >/tmp/boot/ks.cfg< /boot/initrd.img; +rm -rf /tmp/boot; +} + +[[ "$inVNC" == 'y' ]] && { + sed -i '$i\\n' $GRUBDIR/$GRUBFILE + sed -i '$r /tmp/grub.new' $GRUBDIR/$GRUBFILE + echo -e "\n\033[33m\033[04mIt will reboot! \nPlease connect VNC! \nSelect\033[0m\033[32m Install OS [$DIST $VER] \033[33m\033[4mto install system.\033[04m\n\n\033[31m\033[04mThere is some information for you.\nDO NOT CLOSE THE WINDOW! \033[0m\n" + echo -e "\033[35mIPv4\t\tNETMASK\t\tGATEWAY\033[0m" + echo -e "\033[36m\033[04m$IPv4\033[0m\t\033[36m\033[04m$MASK\033[0m\t\033[36m\033[04m$GATE\033[0m\n\n" + + read -n 1 -p "Press Enter to reboot..." INP + [[ "$INP" != '' ]] && echo -ne '\b \n\n'; +} + +chown root:root $GRUBDIR/$GRUBFILE +chmod 444 $GRUBDIR/$GRUBFILE + +if [[ "$loaderMode" == "0" ]]; then + sleep 3 && reboot >/dev/null 2>&1 +else + rm -rf "$HOME/loader" + mkdir -p "$HOME/loader" + cp -rf "/boot/initrd.img" "$HOME/loader/initrd.img" + cp -rf "/boot/vmlinuz" "$HOME/loader/vmlinuz" + [[ -f "/boot/initrd.img" ]] && rm -rf "/boot/initrd.img" + [[ -f "/boot/vmlinuz" ]] && rm -rf "/boot/vmlinuz" + echo && ls -AR1 "$HOME/loader" +fi diff --git a/linux/InstallNET.sh b/linux/InstallNET.sh new file mode 100644 index 0000000..7c2a9ed --- /dev/null +++ b/linux/InstallNET.sh @@ -0,0 +1,777 @@ +#!/bin/bash + +## License: GPL +## It can reinstall Debian, Ubuntu, CentOS system with network. +## Default root password: MoeClub.org +## Blog: https://moeclub.org +## Written By MoeClub.org + +export tmpVER='' +export tmpDIST='' +export tmpURL='' +export tmpWORD='' +export tmpMirror='' +export ipAddr='' +export ipMask='' +export ipGate='' +export ipDNS='8.8.8.8' +export IncDisk='default' +export interface='' +export interfaceSelect='' +export Relese='' +export sshPORT='22' +export ddMode='0' +export setNet='0' +export setRDP='0' +export setIPv6='0' +export isMirror='0' +export FindDists='0' +export loaderMode='0' +export IncFirmware='0' +export SpikCheckDIST='0' +export setInterfaceName='0' +export UNKNOWHW='0' +export UNVER='6.4' +export GRUBDIR='' +export GRUBFILE='' +export GRUBVER='' +export VER='' + +while [[ $# -ge 1 ]]; do + case $1 in + -v|--ver) + shift + tmpVER="$1" + shift + ;; + -d|--debian) + shift + Relese='Debian' + tmpDIST="$1" + shift + ;; + -u|--ubuntu) + shift + Relese='Ubuntu' + tmpDIST="$1" + shift + ;; + -c|--centos) + shift + Relese='CentOS' + tmpDIST="$1" + shift + ;; + -dd|--image) + shift + ddMode='1' + tmpURL="$1" + shift + ;; + -p|--password) + shift + tmpWORD="$1" + shift + ;; + -i|--interface) + shift + interfaceSelect="$1" + shift + ;; + --ip-addr) + shift + ipAddr="$1" + shift + ;; + --ip-mask) + shift + ipMask="$1" + shift + ;; + --ip-gate) + shift + ipGate="$1" + shift + ;; + --ip-dns) + shift + ipDNS="$1" + shift + ;; + --dev-net) + shift + setInterfaceName='1' + ;; + --loader) + shift + loaderMode='1' + ;; + -apt|-yum|--mirror) + shift + isMirror='1' + tmpMirror="$1" + shift + ;; + -rdp) + shift + setRDP='1' + WinRemote="$1" + shift + ;; + -firmware) + shift + IncFirmware="1" + ;; + -port) + shift + sshPORT="$1" + shift + ;; + --noipv6) + shift + setIPv6='1' + ;; + -a|--auto|-m|--manual|-ssl) + shift + ;; + *) + if [[ "$1" != 'error' ]]; then echo -ne "\nInvaild option: '$1'\n\n"; fi + echo -ne " Usage:\n\tbash $(basename $0)\t-d/--debian [\033[33m\033[04mdists-name\033[0m]\n\t\t\t\t-u/--ubuntu [\033[04mdists-name\033[0m]\n\t\t\t\t-c/--centos [\033[04mdists-name\033[0m]\n\t\t\t\t-v/--ver [32/i386|64/\033[33m\033[04mamd64\033[0m] [\033[33m\033[04mdists-verison\033[0m]\n\t\t\t\t--ip-addr/--ip-gate/--ip-mask\n\t\t\t\t-apt/-yum/--mirror\n\t\t\t\t-dd/--image\n\t\t\t\t-p [linux password]\n\t\t\t\t-port [linux ssh port]\n" + exit 1; + ;; + esac + done + +[[ "$EUID" -ne '0' ]] && echo "Error:This script must be run as root!" && exit 1; + +function dependence(){ + Full='0'; + for BIN_DEP in `echo "$1" |sed 's/,/\n/g'` + do + if [[ -n "$BIN_DEP" ]]; then + Found='0'; + for BIN_PATH in `echo "$PATH" |sed 's/:/\n/g'` + do + ls $BIN_PATH/$BIN_DEP >/dev/null 2>&1; + if [ $? == '0' ]; then + Found='1'; + break; + fi + done + if [ "$Found" == '1' ]; then + echo -en "[\033[32mok\033[0m]\t"; + else + Full='1'; + echo -en "[\033[31mNot Install\033[0m]"; + fi + echo -en "\t$BIN_DEP\n"; + fi + done + if [ "$Full" == '1' ]; then + echo -ne "\n\033[31mError! \033[0mPlease use '\033[33mapt-get\033[0m' or '\033[33myum\033[0m' install it.\n\n\n" + exit 1; + fi +} + +function selectMirror(){ + [ $# -ge 3 ] || exit 1 + Relese=$(echo "$1" |sed -r 's/(.*)/\L\1/') + DIST=$(echo "$2" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + VER=$(echo "$3" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + New=$(echo "$4" |sed 's/\ //g') + [ -n "$Relese" ] && [ -n "$DIST" ] && [ -n "$VER" ] || exit 1 + if [ "$Relese" == "debian" ] || [ "$Relese" == "ubuntu" ]; then + [ "$DIST" == "focal" ] && legacy="legacy-" || legacy="" + TEMP="SUB_MIRROR/dists/${DIST}/main/installer-${VER}/current/${legacy}images/netboot/${Relese}-installer/${VER}/initrd.gz" + elif [ "$Relese" == "centos" ]; then + TEMP="SUB_MIRROR/${DIST}/os/${VER}/isolinux/initrd.img" + fi + [ -n "$TEMP" ] || exit 1 + mirrorStatus=0 + declare -A MirrorBackup + MirrorBackup=(["debian0"]="" ["debian1"]="http://deb.debian.org/debian" ["debian2"]="http://archive.debian.org/debian" ["ubuntu0"]="" ["ubuntu1"]="http://archive.ubuntu.com/ubuntu" ["ubuntu2"]="http://ports.ubuntu.com" ["centos0"]="" ["centos1"]="http://mirror.centos.org/centos" ["centos2"]="http://vault.centos.org") + echo "$New" |grep -q '^http://\|^https://\|^ftp://' && MirrorBackup[${Relese}0]="$New" + for mirror in $(echo "${!MirrorBackup[@]}" |sed 's/\ /\n/g' |sort -n |grep "^$Relese") + do + Current="${MirrorBackup[$mirror]}" + [ -n "$Current" ] || continue + MirrorURL=`echo "$TEMP" |sed "s#SUB_MIRROR#${Current}#g"` + wget --no-check-certificate --spider --timeout=3 -o /dev/null "$MirrorURL" + [ $? -eq 0 ] && mirrorStatus=1 && break + done + [ $mirrorStatus -eq 1 ] && echo "$Current" || exit 1 +} + +function netmask() { + n="${1:-32}" + b="" + m="" + for((i=0;i<32;i++)){ + [ $i -lt $n ] && b="${b}1" || b="${b}0" + } + for((i=0;i<4;i++)){ + s=`echo "$b"|cut -c$[$[$i*8]+1]-$[$[$i+1]*8]` + [ "$m" == "" ] && m="$((2#${s}))" || m="${m}.$((2#${s}))" + } + echo "$m" +} + +function getInterface(){ + interface="" + Interfaces=`cat /proc/net/dev |grep ':' |cut -d':' -f1 |sed 's/\s//g' |grep -iv '^lo\|^sit\|^stf\|^gif\|^dummy\|^vmnet\|^vir\|^gre\|^ipip\|^ppp\|^bond\|^tun\|^tap\|^ip6gre\|^ip6tnl\|^teql\|^ocserv\|^vpn'` + defaultRoute=`ip route show default |grep "^default"` + for item in `echo "$Interfaces"` + do + [ -n "$item" ] || continue + echo "$defaultRoute" |grep -q "$item" + [ $? -eq 0 ] && interface="$item" && break + done + echo "$interface" +} + +function getDisk(){ + disks=`lsblk | sed 's/[[:space:]]*$//g' |grep "disk$" |cut -d' ' -f1 |grep -v "fd[0-9]*\|sr[0-9]*" |head -n1` + [ -n "$disks" ] || echo "" + echo "$disks" |grep -q "/dev" + [ $? -eq 0 ] && echo "$disks" || echo "/dev/$disks" +} + +function diskType(){ + echo `udevadm info --query all "$1" 2>/dev/null |grep 'ID_PART_TABLE_TYPE' |cut -d'=' -f2` +} + +function getGrub(){ + Boot="${1:-/boot}" + folder=`find "$Boot" -type d -name "grub*" 2>/dev/null |head -n1` + [ -n "$folder" ] || return + fileName=`ls -1 "$folder" 2>/dev/null |grep '^grub.conf$\|^grub.cfg$'` + if [ -z "$fileName" ]; then + ls -1 "$folder" 2>/dev/null |grep -q '^grubenv$' + [ $? -eq 0 ] || return + folder=`find "$Boot" -type f -name "grubenv" 2>/dev/null |xargs dirname |grep -v "^$folder" |head -n1` + [ -n "$folder" ] || return + fileName=`ls -1 "$folder" 2>/dev/null |grep '^grub.conf$\|^grub.cfg$'` + fi + [ -n "$fileName" ] || return + [ "$fileName" == "grub.cfg" ] && ver="0" || ver="1" + echo "${folder}:${fileName}:${ver}" +} + +function lowMem(){ + mem=`grep "^MemTotal:" /proc/meminfo 2>/dev/null |grep -o "[0-9]*"` + [ -n "$mem" ] || return 0 + [ "$mem" -le "524288" ] && return 1 || return 0 +} + +if [[ "$loaderMode" == "0" ]]; then + Grub=`getGrub "/boot"` + [ -z "$Grub" ] && echo -ne "Error! Not Found grub.\n" && exit 1; + GRUBDIR=`echo "$Grub" |cut -d':' -f1` + GRUBFILE=`echo "$Grub" |cut -d':' -f2` + GRUBVER=`echo "$Grub" |cut -d':' -f3` +fi + +[ -n "$Relese" ] || Relese='Debian' +linux_relese=$(echo "$Relese" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') +clear && echo -e "\n\033[36m# Check Dependence\033[0m\n" + +if [[ "$ddMode" == '1' ]]; then + dependence iconv; + linux_relese='debian'; + tmpDIST='bullseye'; + tmpVER='amd64'; +fi + +[ -n "$ipAddr" ] && [ -n "$ipMask" ] && [ -n "$ipGate" ] && setNet='1'; +if [ "$setNet" == "0" ]; then + dependence ip + [ -n "$interface" ] || interface=`getInterface` + iAddr=`ip addr show dev $interface |grep "inet.*" |head -n1 |grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\/[0-9]\{1,2\}'` + ipAddr=`echo ${iAddr} |cut -d'/' -f1` + ipMask=`netmask $(echo ${iAddr} |cut -d'/' -f2)` + ipGate=`ip route show default |grep "^default" |grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' |head -n1` +fi +if [ -z "$interface" ]; then + dependence ip + [ -n "$interface" ] || interface=`getInterface` +fi +IPv4="$ipAddr"; MASK="$ipMask"; GATE="$ipGate"; + +[ -n "$IPv4" ] && [ -n "$MASK" ] && [ -n "$GATE" ] && [ -n "$ipDNS" ] || { + echo -ne '\nError: Invalid network config\n\n' + bash $0 error; + exit 1; +} + +if [[ "$Relese" == 'Debian' ]] || [[ "$Relese" == 'Ubuntu' ]]; then + dependence wget,awk,grep,sed,cut,cat,lsblk,cpio,gzip,find,dirname,basename; +elif [[ "$Relese" == 'CentOS' ]]; then + dependence wget,awk,grep,sed,cut,cat,lsblk,cpio,gzip,find,dirname,basename,file,xz; +fi +[ -n "$tmpWORD" ] && dependence openssl +[[ -n "$tmpWORD" ]] && myPASSWORD="$(openssl passwd -1 "$tmpWORD")"; +[[ -z "$myPASSWORD" ]] && myPASSWORD='$1$4BJZaD0A$y1QykUnJ6mXprENfwpseH0'; + +tempDisk=`getDisk`; [ -n "$tempDisk" ] && IncDisk="$tempDisk" + +case `uname -m` in aarch64|arm64) VER="arm64";; x86|i386|i686) VER="i386";; x86_64|amd64) VER="amd64";; *) VER="";; esac +tmpVER="$(echo "$tmpVER" |sed -r 's/(.*)/\L\1/')"; +if [[ "$VER" != "arm64" ]] && [[ -n "$tmpVER" ]]; then + case "$tmpVER" in i386|i686|x86|32) VER="i386";; amd64|x86_64|x64|64) [[ "$Relese" == 'CentOS' ]] && VER='x86_64' || VER='amd64';; *) VER='';; esac +fi + +if [[ ! -n "$VER" ]]; then + echo "Error! Not Architecture." + bash $0 error; + exit 1; +fi + +if [[ -z "$tmpDIST" ]]; then + [ "$Relese" == 'Debian' ] && tmpDIST='buster'; + [ "$Relese" == 'Ubuntu' ] && tmpDIST='bionic'; + [ "$Relese" == 'CentOS' ] && tmpDIST='6.10'; +fi + +if [[ -n "$tmpDIST" ]]; then + if [[ "$Relese" == 'Debian' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p' |cut -d'.' -f1)"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '7' ]] && DIST='wheezy'; + [[ "$isDigital" == '8' ]] && DIST='jessie'; + [[ "$isDigital" == '9' ]] && DIST='stretch'; + [[ "$isDigital" == '10' ]] && DIST='buster'; + [[ "$isDigital" == '11' ]] && DIST='bullseye'; + } + } + LinuxMirror=$(selectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'Ubuntu' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p')"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '12.04' ]] && DIST='precise'; + [[ "$isDigital" == '14.04' ]] && DIST='trusty'; + [[ "$isDigital" == '16.04' ]] && DIST='xenial'; + [[ "$isDigital" == '18.04' ]] && DIST='bionic'; + [[ "$isDigital" == '20.04' ]] && DIST='focal'; + } + } + LinuxMirror=$(selectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'CentOS' ]]; then + SpikCheckDIST='1' + DISTCheck="$(echo "$tmpDIST" |grep -o '[\.0-9]\{1,\}' |head -n1)"; + LinuxMirror=$(selectMirror "$Relese" "$DISTCheck" "$VER" "$tmpMirror") + ListDIST="$(wget --no-check-certificate -qO- "$LinuxMirror/dir_sizes" |cut -f2 |grep '^[0-9]')" + DIST="$(echo "$ListDIST" |grep "^$DISTCheck" |head -n1)" + [[ -z "$DIST" ]] && { + echo -ne '\nThe dists version not found in this mirror, Please check it! \n\n' + bash $0 error; + exit 1; + } + wget --no-check-certificate -qO- "$LinuxMirror/$DIST/os/$VER/.treeinfo" |grep -q 'general'; + [[ $? != '0' ]] && { + echo -ne "\nThe version not found in this mirror, Please change mirror try again! \n\n"; + exit 1; + } + fi +fi + +if [[ -z "$LinuxMirror" ]]; then + echo -ne "\033[31mError! \033[0mInvaild mirror! \n" + [ "$Relese" == 'Debian' ] && echo -en "\033[33mexample:\033[0m http://deb.debian.org/debian\n\n"; + [ "$Relese" == 'Ubuntu' ] && echo -en "\033[33mexample:\033[0m http://archive.ubuntu.com/ubuntu\n\n"; + [ "$Relese" == 'CentOS' ] && echo -en "\033[33mexample:\033[0m http://mirror.centos.org/centos\n\n"; + bash $0 error; + exit 1; +fi + +if [[ "$SpikCheckDIST" == '0' ]]; then + DistsList="$(wget --no-check-certificate -qO- "$LinuxMirror/dists/" |grep -o 'href=.*/"' |cut -d'"' -f2 |sed '/-\|old\|Debian\|experimental\|stable\|test\|sid\|devel/d' |grep '^[^/]' |sed -n '1h;1!H;$g;s/\n//g;s/\//\;/g;$p')"; + for CheckDEB in `echo "$DistsList" |sed 's/;/\n/g'` + do + [[ "$CheckDEB" == "$DIST" ]] && FindDists='1' && break; + done + [[ "$FindDists" == '0' ]] && { + echo -ne '\nThe dists version not found, Please check it! \n\n' + bash $0 error; + exit 1; + } +fi + +if [[ "$ddMode" == '1' ]]; then + if [[ -n "$tmpURL" ]]; then + DDURL="$tmpURL" + echo "$DDURL" |grep -q '^http://\|^ftp://\|^https://'; + [[ $? -ne '0' ]] && echo 'Please input vaild URL,Only support http://, ftp:// and https:// !' && exit 1; + else + echo 'Please input vaild image URL! '; + exit 1; + fi +fi + +clear && echo -e "\n\033[36m# Install\033[0m\n" + +[[ "$ddMode" == '1' ]] && echo -ne "\033[34mAuto Mode\033[0m insatll \033[33mWindows\033[0m\n[\033[33m$DDURL\033[0m]\n" + +if [ -z "$interfaceSelect" ]; then + if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + interfaceSelect="auto" + elif [[ "$linux_relese" == 'centos' ]]; then + interfaceSelect="link" + fi +fi + +if [[ "$linux_relese" == 'centos' ]]; then + if [[ "$DIST" != "$UNVER" ]]; then + awk 'BEGIN{print '${UNVER}'-'${DIST}'}' |grep -q '^-' + if [ $? != '0' ]; then + UNKNOWHW='1'; + echo -en "\033[33mThe version lower then \033[31m$UNVER\033[33m may not support in auto mode! \033[0m\n"; + fi + awk 'BEGIN{print '${UNVER}'-'${DIST}'+0.59}' |grep -q '^-' + if [ $? == '0' ]; then + echo -en "\n\033[31mThe version higher then \033[33m6.10 \033[31mis not support in current! \033[0m\n\n" + exit 1; + fi + fi +fi + +echo -e "\n[\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m] Downloading..." + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + [ "$DIST" == "focal" ] && legacy="legacy-" || legacy="" + wget --no-check-certificate -qO '/tmp/initrd.img' "${LinuxMirror}/dists/${DIST}/main/installer-${VER}/current/${legacy}images/netboot/${linux_relese}-installer/${VER}/initrd.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/tmp/vmlinuz' "${LinuxMirror}/dists/${DIST}${inUpdate}/main/installer-${VER}/current/${legacy}images/netboot/${linux_relese}-installer/${VER}/linux" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + MirrorHost="$(echo "$LinuxMirror" |awk -F'://|/' '{print $2}')"; + MirrorFolder="$(echo "$LinuxMirror" |awk -F''${MirrorHost}'' '{print $2}')"; + [ -n "$MirrorFolder" ] || MirrorFolder="/" +elif [[ "$linux_relese" == 'centos' ]]; then + wget --no-check-certificate -qO '/tmp/initrd.img' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/initrd.img" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/tmp/vmlinuz' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/vmlinuz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 +else + bash $0 error; + exit 1; +fi +if [[ "$linux_relese" == 'debian' ]]; then + if [[ "$IncFirmware" == '1' ]]; then + wget --no-check-certificate -qO '/tmp/firmware.cpio.gz' "http://cdimage.debian.org/cdimage/unofficial/non-free/firmware/${DIST}/current/firmware.cpio.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'firmware' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + fi + if [[ "$ddMode" == '1' ]]; then + vKernel_udeb=$(wget --no-check-certificate -qO- "http://$DISTMirror/dists/$DIST/main/installer-$VER/current/images/udeb.list" |grep '^acpi-modules' |head -n1 |grep -o '[0-9]\{1,2\}.[0-9]\{1,2\}.[0-9]\{1,2\}-[0-9]\{1,2\}' |head -n1) + [[ -z "vKernel_udeb" ]] && vKernel_udeb="4.19.0-17" + fi +fi + +if [[ "$loaderMode" == "0" ]]; then + [[ ! -f "${GRUBDIR}/${GRUBFILE}" ]] && echo "Error! Not Found ${GRUBFILE}. " && exit 1; + + [[ ! -f "${GRUBDIR}/${GRUBFILE}.old" ]] && [[ -f "${GRUBDIR}/${GRUBFILE}.bak" ]] && mv -f "${GRUBDIR}/${GRUBFILE}.bak" "${GRUBDIR}/${GRUBFILE}.old"; + mv -f "${GRUBDIR}/${GRUBFILE}" "${GRUBDIR}/${GRUBFILE}.bak"; + [[ -f "${GRUBDIR}/${GRUBFILE}.old" ]] && cat "${GRUBDIR}/${GRUBFILE}.old" >"${GRUBDIR}/${GRUBFILE}" || cat "${GRUBDIR}/${GRUBFILE}.bak" >"${GRUBDIR}/${GRUBFILE}"; +else + GRUBVER='-1' +fi + +[[ "$GRUBVER" == '0' ]] && { + READGRUB='/tmp/grub.read' + cat $GRUBDIR/$GRUBFILE |sed -n '1h;1!H;$g;s/\n/%%%%%%%/g;$p' |grep -om 1 'menuentry\ [^{]*{[^}]*}%%%%%%%' |sed 's/%%%%%%%/\n/g' >$READGRUB + LoadNum="$(cat $READGRUB |grep -c 'menuentry ')" + if [[ "$LoadNum" -eq '1' ]]; then + cat $READGRUB |sed '/^$/d' >/tmp/grub.new; + elif [[ "$LoadNum" -gt '1' ]]; then + CFG0="$(awk '/menuentry /{print NR}' $READGRUB|head -n 1)"; + CFG2="$(awk '/menuentry /{print NR}' $READGRUB|head -n 2 |tail -n 1)"; + CFG1=""; + for tmpCFG in `awk '/}/{print NR}' $READGRUB` + do + [ "$tmpCFG" -gt "$CFG0" -a "$tmpCFG" -lt "$CFG2" ] && CFG1="$tmpCFG"; + done + [[ -z "$CFG1" ]] && { + echo "Error! read $GRUBFILE. "; + exit 1; + } + + sed -n "$CFG0,$CFG1"p $READGRUB >/tmp/grub.new; + [[ -f /tmp/grub.new ]] && [[ "$(grep -c '{' /tmp/grub.new)" -eq "$(grep -c '}' /tmp/grub.new)" ]] || { + echo -ne "\033[31mError! \033[0mNot configure $GRUBFILE. \n"; + exit 1; + } + fi + [ ! -f /tmp/grub.new ] && echo "Error! $GRUBFILE. " && exit 1; + sed -i "/menuentry.*/c\menuentry\ \'Install OS \[$DIST\ $VER\]\'\ --class debian\ --class\ gnu-linux\ --class\ gnu\ --class\ os\ \{" /tmp/grub.new + sed -i "/echo.*Loading/d" /tmp/grub.new; + INSERTGRUB="$(awk '/menuentry /{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +[[ "$GRUBVER" == '1' ]] && { + CFG0="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)"; + CFG1="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 2 |tail -n 1)"; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 == $CFG0 ] && sed -n "$CFG0,$"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 != $CFG0 ] && sed -n "$CFG0,$[$CFG1-1]"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ ! -f /tmp/grub.new ]] && echo "Error! configure append $GRUBFILE. " && exit 1; + sed -i "/title.*/c\title\ \'Install OS \[$DIST\ $VER\]\'" /tmp/grub.new; + sed -i '/^#/d' /tmp/grub.new; + INSERTGRUB="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +if [[ "$loaderMode" == "0" ]]; then + [[ -n "$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $2}' |tail -n 1 |grep '^/boot/')" ]] && Type='InBoot' || Type='NoBoot'; + + LinuxKernel="$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $1}' |head -n 1)"; + [[ -z "$LinuxKernel" ]] && echo "Error! read grub config! " && exit 1; + LinuxIMG="$(grep 'initrd.*/' /tmp/grub.new |awk '{print $1}' |tail -n 1)"; + [ -z "$LinuxIMG" ] && sed -i "/$LinuxKernel.*\//a\\\tinitrd\ \/" /tmp/grub.new && LinuxIMG='initrd'; + + [[ "$setInterfaceName" == "1" ]] && Add_OPTION="net.ifnames=0 biosdevname=0" || Add_OPTION="" + [[ "$setIPv6" == "1" ]] && Add_OPTION="$Add_OPTION ipv6.disable=1" + + lowMem || Add_OPTION="$Add_OPTION lowmem/low=1" + + if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + BOOT_OPTION="auto=true $Add_OPTION hostname=$linux_relese domain= -- quiet" + elif [[ "$linux_relese" == 'centos' ]]; then + BOOT_OPTION="ks=file://ks.cfg $Add_OPTION ksdevice=$interfaceSelect" + fi + + [[ "$Type" == 'InBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/boot\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/boot\/initrd.img" /tmp/grub.new; + } + + [[ "$Type" == 'NoBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/initrd.img" /tmp/grub.new; + } + + sed -i '$a\\n' /tmp/grub.new; + + sed -i ''${INSERTGRUB}'i\\n' $GRUBDIR/$GRUBFILE; + sed -i ''${INSERTGRUB}'r /tmp/grub.new' $GRUBDIR/$GRUBFILE; + [[ -f $GRUBDIR/grubenv ]] && sed -i 's/saved_entry/#saved_entry/g' $GRUBDIR/grubenv; +fi + +[[ -d /tmp/boot ]] && rm -rf /tmp/boot; +mkdir -p /tmp/boot; +cd /tmp/boot; + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + COMPTYPE="gzip"; +elif [[ "$linux_relese" == 'centos' ]]; then + COMPTYPE="$(file ../initrd.img |grep -o ':.*compressed data' |cut -d' ' -f2 |sed -r 's/(.*)/\L\1/' |head -n1)" + [[ -z "$COMPTYPE" ]] && echo "Detect compressed type fail." && exit 1; +fi +CompDected='0' +for COMP in `echo -en 'gzip\nlzma\nxz'` + do + if [[ "$COMPTYPE" == "$COMP" ]]; then + CompDected='1' + if [[ "$COMPTYPE" == 'gzip' ]]; then + NewIMG="initrd.img.gz" + else + NewIMG="initrd.img.$COMPTYPE" + fi + mv -f "/tmp/initrd.img" "/tmp/$NewIMG" + break; + fi + done +[[ "$CompDected" != '1' ]] && echo "Detect compressed type not support." && exit 1; +[[ "$COMPTYPE" == 'lzma' ]] && UNCOMP='xz --format=lzma --decompress'; +[[ "$COMPTYPE" == 'xz' ]] && UNCOMP='xz --decompress'; +[[ "$COMPTYPE" == 'gzip' ]] && UNCOMP='gzip -d'; + +$UNCOMP < /tmp/$NewIMG | cpio --extract --verbose --make-directories --no-absolute-filenames >>/dev/null 2>&1 + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then +cat >/tmp/boot/preseed.cfg<>/dev/null 2>&1 +else + sed -i '/d-i\ grub-installer\/force-efi-extra-removable/d' /tmp/boot/preseed.cfg +fi + +[[ "$ddMode" == '1' ]] && { +WinNoDHCP(){ + echo -ne "for\0040\0057f\0040\0042tokens\00753\0052\0042\0040\0045\0045i\0040in\0040\0050\0047netsh\0040interface\0040show\0040interface\0040\0136\0174more\0040\00533\0040\0136\0174findstr\0040\0057I\0040\0057R\0040\0042本地\0056\0052\0040以太\0056\0052\0040Local\0056\0052\0040Ethernet\0042\0047\0051\0040do\0040\0050set\0040EthName\0075\0045\0045j\0051\r\nnetsh\0040\0055c\0040interface\0040ip\0040set\0040address\0040name\0075\0042\0045EthName\0045\0042\0040source\0075static\0040address\0075$IPv4\0040mask\0075$MASK\0040gateway\0075$GATE\r\nnetsh\0040\0055c\0040interface\0040ip\0040add\0040dnsservers\0040name\0075\0042\0045EthName\0045\0042\0040address\00758\00568\00568\00568\0040index\00751\0040validate\0075no\r\n\r\n" >>'/tmp/boot/net.tmp'; +} +WinRDP(){ + echo -ne "netsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075ALL\r\nnetsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075CURRENT\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Network\0134NewNetworkWindowOff\0042\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0042\0040\0057v\0040fDenyTSConnections\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134Wds\0134rdpwd\0134Tds\0134tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040UserAuthentication\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040TermService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040UmRdpService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nSC\0040START\0040TermService\r\n\r\n" >>'/tmp/boot/net.tmp'; +} + echo -ne "\0100ECHO\0040OFF\r\n\r\ncd\0056\0076\0045WINDIR\0045\0134GetAdmin\r\nif\0040exist\0040\0045WINDIR\0045\0134GetAdmin\0040\0050del\0040\0057f\0040\0057q\0040\0042\0045WINDIR\0045\0134GetAdmin\0042\0051\0040else\0040\0050\r\necho\0040CreateObject\0136\0050\0042Shell\0056Application\0042\0136\0051\0056ShellExecute\0040\0042\0045\0176s0\0042\0054\0040\0042\0045\0052\0042\0054\0040\0042\0042\0054\0040\0042runas\0042\0054\00401\0040\0076\0076\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\n\0042\0045temp\0045\0134Admin\0056vbs\0042\r\ndel\0040\0057f\0040\0057q\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\nexit\0040\0057b\00402\0051\r\n\r\n" >'/tmp/boot/net.tmp'; + [[ "$setNet" == '1' ]] && WinNoDHCP; + [[ "$setNet" == '0' ]] && [[ "$AutoNet" == '0' ]] && WinNoDHCP; + [[ "$setRDP" == '1' ]] && [[ -n "$WinRemote" ]] && WinRDP + echo -ne "ECHO\0040SELECT\0040VOLUME\0075\0045\0045SystemDrive\0045\0045\0040\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nECHO\0040EXTEND\0040\0076\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nSTART\0040/WAIT\0040DISKPART\0040\0057S\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nDEL\0040\0057f\0040\0057q\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\n\r\n" >>'/tmp/boot/net.tmp'; + echo -ne "cd\0040\0057d\0040\0042\0045ProgramData\0045\0057Microsoft\0057Windows\0057Start\0040Menu\0057Programs\0057Startup\0042\r\ndel\0040\0057f\0040\0057q\0040net\0056bat\r\n\r\n\r\n" >>'/tmp/boot/net.tmp'; + iconv -f 'UTF-8' -t 'GBK' '/tmp/boot/net.tmp' -o '/tmp/boot/net.bat' + rm -rf '/tmp/boot/net.tmp' +} + +[[ "$ddMode" == '0' ]] && { + sed -i '/anna-install/d' /tmp/boot/preseed.cfg + sed -i 's/wget.*\/sbin\/reboot\;\ //g' /tmp/boot/preseed.cfg +} + +elif [[ "$linux_relese" == 'centos' ]]; then +cat >/tmp/boot/ks.cfg< /tmp/initrd.img; +cp -f /tmp/initrd.img /boot/initrd.img || sudo cp -f /tmp/initrd.img /boot/initrd.img +cp -f /tmp/vmlinuz /boot/vmlinuz || sudo cp -f /tmp/vmlinuz /boot/vmlinuz + +chown root:root $GRUBDIR/$GRUBFILE +chmod 444 $GRUBDIR/$GRUBFILE + +if [[ "$loaderMode" == "0" ]]; then + sleep 3 && reboot || sudo reboot >/dev/null 2>&1 +else + rm -rf "$HOME/loader" + mkdir -p "$HOME/loader" + cp -rf "/boot/initrd.img" "$HOME/loader/initrd.img" + cp -rf "/boot/vmlinuz" "$HOME/loader/vmlinuz" + [[ -f "/boot/initrd.img" ]] && rm -rf "/boot/initrd.img" + [[ -f "/boot/vmlinuz" ]] && rm -rf "/boot/vmlinuz" + echo && ls -AR1 "$HOME/loader" +fi + + diff --git "a/linux/Linux-screen-\346\226\260\347\252\227\345\217\243.md" "b/linux/Linux-screen-\346\226\260\347\252\227\345\217\243.md" new file mode 100644 index 0000000..ba34894 --- /dev/null +++ "b/linux/Linux-screen-\346\226\260\347\252\227\345\217\243.md" @@ -0,0 +1,155 @@ +# 简介 + + Screen是一款由GNU计划开发的用于命令行终端切换的自由软件。 + + 用户可以通过该软件同时连接多个本地或远程的命令行会话,并在其间自由切换。 + + GNU Screen可以看作是窗口管理器的命令行界面版本。 + + 它提供了统一的管理多个会话的界面和相应的功能。 + + 官方网址:http://www.gnu.org/software/screen/ + + 在Screen环境下,所有的会话都独立的运行,并拥有各自的编号、输入、输出和窗口缓存。 + + 用户可以通过快捷键在不同的窗口下切换,并可以自由的重定向各个窗口的输入和输出。 + +# 安装 + + yum install -y screen + + +# 语法 + +> screen [-AmRvx -ls -wipe][-d <作业名称>][-h <行数>][-r <作业名称>][-s ][-S <作业名称>] + + -A  将所有的视窗都调整为目前终端机的大小。 + + -d <作业名称>  将指定的screen作业离线。 + + -h <行数>  指定视窗的缓冲区行数。 + + -m  即使目前已在作业中的screen作业,仍强制建立新的screen作业。 + + -r <作业名称>  恢复离线的screen作业。 + + -R  先试图恢复离线的作业。若找不到离线的作业,即建立新的screen作业。 + + -s  指定建立新视窗时,所要执行的shell。 + + -S <作业名称>  指定screen作业的名称。 + + -v  显示版本信息。 + + -x  恢复之前离线的screen作业。 + + -ls或--list  显示目前所有的screen作业。 + + -wipe  检查目前所有的screen作业,并删除已经无法使用的screen作业。 + +# 常用screen参数 + + screen -S yourname -> 新建一个叫yourname的session + + screen -ls -> 列出当前所有的session + + screen -r yourname -> 回到yourname这个session + + screen -d yourname -> 远程detach某个session + + screen -d -r yourname -> 结束当前session并回到yourname这个session + +# 在Session下,使用ctrl+a(C-a) + C-a ? -> 显示所有键绑定信息 + + C-a c -> 创建一个新的运行shell的窗口并切换到该窗口 + + C-a n -> Next,切换到下一个 window + + C-a p -> Previous,切换到前一个 window + + C-a 0..9 -> 切换到第 0..9 个 window + + Ctrl+a [Space] -> 由视窗0循序切换到视窗9 + + C-a C-a -> 在两个最近使用的 window 间切换 + + C-a x -> 锁住当前的 window,需用用户密码解锁 + + C-a d -> detach,暂时离开当前session,将目前的 screen session (可能含有多个 windows) + + 丢到后台执行,并会回到还没进 screen 时的状态,此时在 screen session 里, + + 每个 window 内运行的 process (无论是前台/后台)都在继续执行,即使 logout 也不影响。 + + C-a z -> 把当前session放到后台执行,用 shell 的 fg 命令则可回去。 + + C-a w -> 显示所有窗口列表 + + C-a t -> time,显示当前时间,和系统的 load + + C-a k -> kill window,强行关闭当前的 window + + C-a [ -> 进入 copy mode,在 copy mode 下可以回滚、搜索、复制就像用使用 vi 一样 + + C-b Backward,PageUp + + C-f Forward,PageDown + + H(大写) High,将光标移至左上角 + + L Low,将光标移至左下角 + + 0 移到行首 + + $ 行末 + + w forward one word,以字为单位往前移 + + b backward one word,以字为单位往后移 + + Space 第一次按为标记区起点,第二次按为终点 + + Esc 结束 copy mode + + C-a ] -> paste,把刚刚在 copy mode 选定的内容贴上 + + +# 常用操作 + + 创建会话(-m 强制): + +>screen -dmS session_name + +#### 关闭会话: + + session_name 可能重复 ,所以还可以用 session_name前面的id号 + + 大致如下 有两个session_name 为 ojbk 其中一个id号是9554 另一个是15494 + + [root@VM-4-2-centos mysql-8.0.21]# screen -ls + There is a screen on: + 9554.ojbk (Detached) + 15494.ojbk (Detached) + 1 Socket in /var/run/screen/S-root. + + + + +>screen -X -S session_name quit + +或者 + +>screen -X -S id quit + +#### 查看所有会话: + +>screen -ls + +#### 进入会话: + +>screen -r session_name + +或者 + +>screen -r id diff --git a/linux/README.md b/linux/README.md new file mode 100644 index 0000000..e720edd --- /dev/null +++ b/linux/README.md @@ -0,0 +1,228 @@ +# 记录 vps 采坑流程 + + 上周正好 黑色星期五,于是9.9美刀入手 一只传家宝 。pending 了三天 终于 开通了、中间 还因为防火墙的问题导致 ssh直接挂了。 + + 开通后到手第一件事 必然就是开机装上自带的预装系统 + +## 接下来 开始一系列的折腾 + + 在 网上搜了一下 纯净版CentOS系统的 一键安装脚本[该脚本有没有问题我不清楚] + 毕竟我不懂shell编程 里面的代码也是基本看不懂,秉着一颗真诚的心应该不会有啥问题。 + +#### 准备工作 + +Ubuntu + +>apt-get install -y xz-utils openssl gawk coreutils file + +CentOS + +>yum install -y xz openssl gawk coreutils file + + +#### [下载脚本] + +下面的是阉割版 仅装centos 墙裂推荐。 + + 下载 + +>wget --no-check-certificate -qO CentOSNET.sh 'https://static.ojbk.plus/shell/CentOSNET.sh' && chmod a+x CentOSNET.sh + + 默认安装 + +>bash CentOSNET.sh -c 6.8 -v 64 -a + + 脚本使用 + + Usage: + bash CentOSNET.sh -c/--centos [dist-version] + -v/--ver [32/i386|64/amd64] + --ip-addr/--ip-gate/--ip-mask + -yum/--mirror + -a/-m + +详情请查看 作者原文[原文链接](https://moeclub.org/2018/03/26/597/) + + +#### 功能强大的脚本 支持 CentOS,Ubuntu,Debian + + 原作者的下载链接 +>wget --no-check-certificate -qO InstallNET.sh 'https://moeclub.org/attachment/LinuxShell/InstallNET.sh' && chmod a+x InstallNET.sh + + 当时备份的文件 + +>wget --no-check-certificate -qO InstallNET.sh 'https://raw.githubusercontent.com/xx13295/MD-Note/master/linux/InstallNET.sh' && chmod a+x InstallNET.sh + + + + +原作者的链接脚本可能会实时更新的以后会出现更多的新功能也可能修复某一些bug + + 当然也可能跑路 无法下载 + + 默认安装 + +>bash InstallNET.sh -c 6.9 -v 64 -a + + 然后 等待 20~30分钟左右 就好了 + [链接不了不要慌张 等就完事了 过了1小时还不行 估计要重装了] + 如果要查看进度 一般的云服务商都有vnc服务 + 使用它就可以实时查看安装进度了 + + + +#### 警告 + + 反正别人造好的轮子 用就完事了 用就代表相信作者 , + 当然也要承担一定的风险( 比如 dd包 存在安全隐患 ) 。 + 不相信作者就别用! + + +## 正式 折腾开始 + + 这个脚本装完系统以后 默认root账号密码如下 + +|参数|详情| +|:-|:-| +|用户|root | +|密码|Vicer | +|端口|22 | + +使用 ssh工具登录 + +#### 一 、第一步肯定是修改密码了 + +>passwd + +然后输入两次新密码就完事了 + +#### 二 、修改ssh 端口 + +>vi /etc/ssh/sshd_config + +按键盘的 i 键 开始 编辑 +找到#Port 22 +删除#号 +在另起一行写入 + +>Port 2333 + +这样就同时拥有了 22端口和2333端口的ssh链接端口 + +保存退出(Esc --> : --> wq ) + +>vi /etc/sysconfig/iptables + +找到 `-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT` 这一行 + +这这一行后面 输入`-A INPUT -m state --state NEW -m tcp -p tcp --dport 2333 -j ACCEPT` + +一定要在这个后面加入 别 直接放到最后一行 ,放最后面可能会导致防火墙出现问题,到时候连不上就哭吧 + +然后 重启防火墙 + +>/etc/rc.d/init.d/iptables restart + +查看添加的端口是否打开 + +>/etc/init.d/iptables status + +重启sshd服务 + +>/etc/init.d/sshd restart + +重启sshd之后使用新的端口测试下 登录 + +如果可以登录就编辑`/etc/ssh/sshd_config` 删除`Port 22` 保存后重启sshd 服务 + +这样以后就可以用新的2333端口登录了 + +#### 三、 创建普通用户 + +创建一个ojbk用户 + +>useradd ojbk + +为ojbk用户设置密码 + +>passwd ojbk + +输入两遍密码就完事了 + +赋予这个用户sudo权限 首先先给'suders'文件修改权限 + +>chmod u+w /etc/sudoers + +. + +>vi /etc/sudoers + +找到`root ALL=(ALL) ALL` 这一行在它下面一行加上 + +>ojbk ALL=(ALL) NOPASSWD: ALL + +保存退出完事 + +这时候 这个ojbk 用户就可以使用sudo 获取root权限了 + +注意 `NOPASSWD: ALL` 这个写法 是获取root权限时不必再次输入密码 + +当然如果你喜欢输密码 去掉前面的`NOPASSWD: ` 就完事了 + +#### 四、关闭root用户直接ssh登录 + +>/etc/ssh/sshd_config + + +找到这一行`#PermitRootLogin yes` + +去掉 前面的`#`号 并且把`yes` 改成`no` +变成`PermitRootLogin no` +保存退出 + +重启 sshd 服务 +>/etc/init.d/sshd restart + +这时候 root 用户 就不能直接登录了。 + +可以先登录普通用户 使用 `su root`切换root用户。 + + +#### 五、 禁止 ping + + ping命令常用于网络检测,确定两台主机之间是否可以通信, + 其使用的是ICMP(Internet控制报文协议),起到一定的安全作用。 + + +系统默认是允许ICMP协议的,设置`/proc/sys/net/ipv4/icmp_echo_ignore_all`参数为1 + +> echo "1" > /proc/sys/net/ipv4/icmp_echo_ignore_all + +检查参数是否为 1 + +>cat  /proc/sys/net/ipv4/icmp_echo_ignore_all + +如果该参数为1,表示禁止忽略ICMP报文的所有请求,系统默认值为0,表示允许其它主机ping该主机 + + + + + + + + + + + + + + + + + + + + + + + diff --git a/linux/dd-Windows.md b/linux/dd-Windows.md new file mode 100644 index 0000000..caa5d61 --- /dev/null +++ b/linux/dd-Windows.md @@ -0,0 +1,12 @@ +# + +作者链接 + +wget --no-check-certificate -qO InstallNET.sh 'https://moeclub.org/attachment/LinuxShell/InstallNET.sh' && bash InstallNET.sh -dd '[Windows dd包直连地址]' + + +备份地址 + +wget --no-check-certificate -qO InstallNET-20200714.sh 'https://raw.githubusercontent.com/xx13295/MD-Note/master/linux/InstallNET-20200714.sh' && bash InstallNET-20200714.sh -dd '[Windows dd包直连地址]' + + diff --git "a/linux/linux\345\233\236\346\224\266\347\253\231\345\212\237\350\203\275/README.md" "b/linux/linux\345\233\236\346\224\266\347\253\231\345\212\237\350\203\275/README.md" new file mode 100644 index 0000000..816bc5c --- /dev/null +++ "b/linux/linux\345\233\236\346\224\266\347\253\231\345\212\237\350\203\275/README.md" @@ -0,0 +1,64 @@ +# 伪回收站功能 + + 麻麻再也不用担心我 不小心删错文件了 。 + + 原理: 就是 将要删除的文件 移动到 某一个 文件夹中。 + + +## 第一步 创建一个 文件夹 当做 回收站 + +>mkdir -p ~/.Ojbk + + +## 第二步 配置 当前用户专用.bashrc文件 + +>vi ~/.bashrc + + 按 一下 键盘的 i 键 开始写入 下面的一大串 内容 写完后 Esc : wq 保存 + + + alias rm=mvfile + alias lss='ls ~/.Ojbk' + alias mvv=undel + alias del=delfile + alias delall=delallfile + + mvfile() + { + mv $@ ~/.Ojbk/`date +"%Y%m%d-%H:%M:%S"`-$@ + } + undel() + { + mv -i ~/.Ojbk/$@ ./ + } + delfile() + { + read -p "are you sure?[y/n]" confirm + [ $confirm == 'y' ] || [ $confirm == 'Y' ] && /usr/bin/rm -rf ~/.Ojbk/$@ + } + delallfile() + { + read -p "are you sure?[y/n]" confirm + [ $confirm == 'y' ] || [ $confirm == 'Y' ] && /usr/bin/rm -rf ~/.Ojbk/* + } + + +## 第三步 让配置生效 + +>source ~/.bashrc + + +### 食用 方法 +|参数|作用| +|:-|:-| +|rm|删除文件到回收站 (参数为要删除的文件名) | +|mvv|还原回收站的文件到用户目录下(参数为回收站中文件名)| +|lss|列出回收站中被你伪删除的文件(无参)| +|del|递归删除某个文件(参数为回收站中文件名)| +|delall|删除回收站里全部文件(无参)| + +### 可以配合的定时清理文件食用 + + 这样 删除到回收站里的 文件定期就会自动删除 ~ +![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) +[传送门](https://github.com/xx13295/MD-Note/tree/master/linux/linux%E5%AE%9A%E6%97%B6%E6%B8%85%E7%90%86%E6%96%87%E4%BB%B6) diff --git "a/linux/linux\345\256\232\346\227\266\346\270\205\347\220\206\346\226\207\344\273\266/README.md" "b/linux/linux\345\256\232\346\227\266\346\270\205\347\220\206\346\226\207\344\273\266/README.md" new file mode 100644 index 0000000..12db2e7 --- /dev/null +++ "b/linux/linux\345\256\232\346\227\266\346\270\205\347\220\206\346\226\207\344\273\266/README.md" @@ -0,0 +1,72 @@ +# 定时清理 历史遗留文件 + + 麻麻再也不用担心我 服务器存储空间不足了。 + + +## 第一步 ,清理 前N天的 文件 第二步,加入定时任务 + + +### 查询 /home/ojbk/images/ 中 文件后缀为.jpg 且时间为5天以前的 图片将其删除 +>find /home/ojbk/images/ -mtime +5 -name "*.jpg" -exec rm -rf {} \; + +### 总不能每次都手动执行上面这条命令吧这样太麻烦了 + + 于是我们把它加入到定时任务中 使用 crontab -e + +>2 0 * * * /bin/find /home/ojbk/images/ -mtime +5 -name "*.jpg" -exec rm -rf {} \; + +上面这条命令说的是 每天凌晨0点2分 删除/home/ojbk/images/中 前 5天的 jpg 图片 + + +##删除命令 + +>find 对应目录 -mtime +天数 -name "文件名" -exec rm -rf {} \; + + +|参数|作用| +|:-|:-| +|find|查找命令 | +|/home/ojbk/images/|想要进行清理的目录,这里只是示例目录| +|-mtime|限定的时间| +|+5| 前5天 写成 -1 就是包括今天| +|"*.jpg"|图片文件,所有文件的话就是将jpg换成* 就好了| +|-exec rm -rf {} \;|删除命令 | + +## 定时任务 + + * * * * * command + + +####这前面 的 五个 * 代表什么意思呢? + +>答:分别代表 分钟、小时、 日期、月份、星期几 这里参数全是1 说明这个定时任务 1分钟执行一次 + +>>分钟,可以是从0到59之间的任何整数 +>>小时,可以是从0到23之间的任何整数。 +>>日期,可以是从1到31之间的任何整数。 +>>月份,可以是从1到12之间的任何整数。 +>>星期几,可以是从0到7之间的任何整数,这里的0或7代表星期日。 +>>command:要执行的命令,可以是系统命令,也可以是自己编写的脚本文件。 + + 当然 你可以把 一系列的命令写到shell脚本中 通过这个 + +>crontab -e + + 加入到定时任务中 + +>>星号(*):代表所有可能的值,例如month字段如果是星号,则表示在满足其它字段的制约条件后每月都执行该命令操作。 + +>>逗号(,):可以用逗号隔开的值指定一个列表范围,例如,“1,3,5,7” + +>>横杠(-):可以用整数之间的中杠表示一个整数范围,例如“1-5”表示“1,,2,3,4,5” + +>>斜线(/):可以用正斜线指定时间的间隔频率,例如“0-23/2”表示每两小时执行一次,同时正斜线可以和星号一起使用,例如*/10,如果用在minute字段,表示每十分钟执行一次。 + +|示例|作用| +|:-|:-| +|6,12 * * * *|每小时的第6分钟和第12分钟执行 | +|6,12 5-6 * * * |在上午5点到6点的第6和第12分钟执行| +|6,12 5-6 */6 * * |每隔两天执行一次,具体执行时间为上午5点到6点的第6和第12分钟| +|5 3 * * 6,0| 每周六周日3点5分执行| + + diff --git "a/linux/linux\346\215\242\345\206\205\346\240\270/README.md" "b/linux/linux\346\215\242\345\206\205\346\240\270/README.md" new file mode 100644 index 0000000..614f75e --- /dev/null +++ "b/linux/linux\346\215\242\345\206\205\346\240\270/README.md" @@ -0,0 +1,85 @@ +# Ubuntu 系统 + +### 查看系统版本 + +>lsb_release -a + +### 查看当前安装的内核 + +>dpkg -l|grep linux-image + +### 安装新内核 + +>sudo apt-get install linux-image-3.16.0-43-generic linux-image-extra-3.16.0-43-generic + +### 卸载不要的内核 + +>sudo apt-get purge linux-image-4.4.0-38-generic linux-image-extra-4.4.0-38-generic + +### 更新 grub引导 + +>sudo update-grub + +### 搜索并更新可用的内核 + +>sudo apt-cache search linux-image + +# CentOs 系统 + +### 查看CentOs版本号: + +>cat /etc/redhat-release + +### 查内核版本 + +>uname -r + +### 更多内核版本获取 请网上自找 + + 这里提供的 内核都是 某速 可以用的内核 + +#### CentOs 6 + +>rpm -ivh https://static.ojbk.plus/centos-rpm/kernel-firmware-2.6.32-504.3.3.el6.noarch.rpm +>rpm -ivh https://static.ojbk.plus/centos-rpm/kernel-2.6.32-504.3.3.el6.x86_64.rpm --force + +#### CentOs 7 + +>rpm -ivh https://static.ojbk.plus/centos-rpm/kernel-3.10.0-229.1.2.el7.x86_64.rpm --force + +### 确认 我们更新的内核版本是 default 0 + +>cat /boot/grub/grub.conf + +通常新装都会排到前面 + +我们会看到`title CentOS (2.6.32-504.3.3.el6.x86_64) ` 然后一大溜 [这个是我们刚装的] + +下面还有一个例如`title CentOS (2.6.32-754.6.3.el6.x86_64)`然后一大溜[这个是原来就带的] + +非注释部分的 开始 有个 `default=0` + +这个`default=0` 代表 选择的内核是` 2.6.32-504` + +如果改成 ` default=1` 就代表 选择的内核是 `2.6.32-754` + + +### 重启 + +>reboot + +### 启动后 看是否成功换成我们想要的内核 + +>uname -r + +### 查看安装了哪些 内核包 + +>rpm -qa |grep kernel + +### 使用yum remove 或rpm -e 删除无用内核 + +>yum remove kernel-2.6.32-754.6.3.el6.x86_64 + +### 搜索并更新可用的内核 +>yum install search kernel + diff --git a/linux/ojbk.sh b/linux/ojbk.sh new file mode 100644 index 0000000..a0f83d2 --- /dev/null +++ b/linux/ojbk.sh @@ -0,0 +1,855 @@ +#!/bin/bash + +## License: GPL +## It can reinstall Debian, Ubuntu, CentOS system with network. + +export tmpVER='' +export tmpDIST='' +export tmpURL='' +export tmpWORD='' +export tmpMirror='' +export tmpSSL='' +export tmpINS='' +export ipAddr='' +export ipMask='' +export ipGate='' +export Relese='' +export ddMode='0' +export setNet='0' +export setRDP='0' +export setIPv6='0' +export isMirror='0' +export FindDists='0' +export loaderMode='0' +export IncFirmware='0' +export SpikCheckDIST='0' +export setInterfaceName='0' +export UNKNOWHW='0' +export UNVER='6.4' + +while [[ $# -ge 1 ]]; do + case $1 in + -v|--ver) + shift + tmpVER="$1" + shift + ;; + -d|--debian) + shift + Relese='Debian' + tmpDIST="$1" + shift + ;; + -u|--ubuntu) + shift + Relese='Ubuntu' + tmpDIST="$1" + shift + ;; + -c|--centos) + shift + Relese='CentOS' + tmpDIST="$1" + shift + ;; + -dd|--image) + shift + ddMode='1' + tmpURL="$1" + shift + ;; + -p|--password) + shift + tmpWORD="$1" + shift + ;; + -i|--interface) + shift + interface="$1" + shift + ;; + --ip-addr) + shift + ipAddr="$1" + shift + ;; + --ip-mask) + shift + ipMask="$1" + shift + ;; + --ip-gate) + shift + ipGate="$1" + shift + ;; + --dev-net) + shift + setInterfaceName='1' + ;; + --loader) + shift + loaderMode='1' + ;; + --prefer) + shift + tmpPrefer="$1" + shift + ;; + -a|--auto) + shift + tmpINS='auto' + ;; + -m|--manual) + shift + tmpINS='manual' + ;; + -apt|-yum|--mirror) + shift + isMirror='1' + tmpMirror="$1" + shift + ;; + -rdp) + shift + setRDP='1' + WinRemote="$1" + shift + ;; + -ssl) + shift + tmpSSL="$1" + shift + ;; + -firmware) + shift + IncFirmware="1" + ;; + --ipv6) + shift + setIPv6='1' + ;; + *) + if [[ "$1" != 'error' ]]; then echo -ne "\nInvaild option: '$1'\n\n"; fi + echo -ne " Usage:\n\tbash $(basename $0)\t-d/--debian [\033[33m\033[04mdists-name\033[0m]\n\t\t\t\t-u/--ubuntu [\033[04mdists-name\033[0m]\n\t\t\t\t-c/--centos [\033[33m\033[04mdists-verison\033[0m]\n\t\t\t\t-v/--ver [32/\033[33m\033[04mi386\033[0m|64/amd64]\n\t\t\t\t--ip-addr/--ip-gate/--ip-mask\n\t\t\t\t-apt/-yum/--mirror\n\t\t\t\t-dd/--image\n\t\t\t\t-a/--auto\n\t\t\t\t-m/--manual\n" + exit 1; + ;; + esac + done + +[[ "$EUID" -ne '0' ]] && echo "Error:This script must be run as root!" && exit 1; + +function CheckDependence(){ +FullDependence='0'; +for BIN_DEP in `echo "$1" |sed 's/,/\n/g'` + do + if [[ -n "$BIN_DEP" ]]; then + Founded='0'; + for BIN_PATH in `echo "$PATH" |sed 's/:/\n/g'` + do + ls $BIN_PATH/$BIN_DEP >/dev/null 2>&1; + if [ $? == '0' ]; then + Founded='1'; + break; + fi + done + if [ "$Founded" == '1' ]; then + echo -en "[\033[32mok\033[0m]\t"; + else + FullDependence='1'; + echo -en "[\033[31mNot Install\033[0m]"; + fi + echo -en "\t$BIN_DEP\n"; + fi + done +if [ "$FullDependence" == '1' ]; then + echo -ne "\n\033[31mError! \033[0mPlease use '\033[33mapt-get\033[0m' or '\033[33myum\033[0m' install it.\n\n\n" + exit 1; +fi +} + +function SelectMirror(){ + [ $# -ge 3 ] || exit 1 + Relese="$1" + DIST=$(echo "$2" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + VER=$(echo "$3" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') + New=$(echo "$4" |sed 's/\ //g') + [ -n "$Relese" ] || exit 1 + [ -n "$DIST" ] || exit 1 + [ -n "$VER" ] || exit 1 + relese=$(echo $Relese |sed -r 's/(.*)/\L\1/') + if [ "$Relese" == "Debian" ] || [ "$Relese" == "Ubuntu" ]; then + inUpdate=''; [ "$Relese" == "Ubuntu" ] && inUpdate='-updates' + MirrorTEMP="SUB_MIRROR/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${relese}-installer/${VER}/initrd.gz" + elif [ "$Relese" == "CentOS" ]; then + MirrorTEMP="SUB_MIRROR/${DIST}/os/${VER}/isolinux/initrd.img" + fi + [ -n "$MirrorTEMP" ] || exit 1 + MirrorStatus=0 + declare -A MirrorBackup + MirrorBackup=(["Debian0"]="" ["Debian1"]="http://deb.debian.org/debian" ["Debian2"]="http://archive.debian.org/debian" ["Ubuntu0"]="" ["Ubuntu1"]="http://archive.ubuntu.com/ubuntu" ["CentOS0"]="" ["CentOS1"]="http://mirror.centos.org/centos" ["CentOS2"]="http://vault.centos.org") + echo "$New" |grep -q '^http://\|^https://\|^ftp://' && MirrorBackup[${Relese}0]="$New" + for mirror in $(echo "${!MirrorBackup[@]}" |sed 's/\ /\n/g' |sort -n |grep "^$Relese") + do + CurMirror="${MirrorBackup[$mirror]}" + [ -n "$CurMirror" ] || continue + MirrorURL=`echo "$MirrorTEMP" |sed "s#SUB_MIRROR#${CurMirror}#g"` + wget --no-check-certificate --spider --timeout=3 -o /dev/null "$MirrorURL" + [ $? -eq 0 ] && MirrorStatus=1 && break + done + [ $MirrorStatus -eq 1 ] && echo "$CurMirror" || exit 1 +} + +[ -n "$Relese" ] || Relese='Debian' +linux_relese=$(echo "$Relese" |sed 's/\ //g' |sed -r 's/(.*)/\L\1/') +clear && echo -e "\n\033[36m# Check Dependence\033[0m\n" + +if [[ "$ddMode" == '1' ]]; then + CheckDependence iconv; + linux_relese='debian'; + tmpDIST='jessie'; + tmpVER='amd64'; + tmpINS='auto'; +fi + +if [[ "$Relese" == 'Debian' ]] || [[ "$Relese" == 'Ubuntu' ]]; then + CheckDependence wget,awk,grep,sed,cut,cat,cpio,gzip,find,dirname,basename; +elif [[ "$Relese" == 'CentOS' ]]; then + CheckDependence wget,awk,grep,sed,cut,cat,cpio,gzip,find,dirname,basename,file,xz; +fi +[ -n "$tmpWORD" ] && CheckDependence openssl + +if [[ "$loaderMode" == "0" ]]; then + [[ -f '/boot/grub/grub.cfg' ]] && GRUBVER='0' && GRUBDIR='/boot/grub' && GRUBFILE='grub.cfg'; + [[ -z "$GRUBDIR" ]] && [[ -f '/boot/grub2/grub.cfg' ]] && GRUBVER='0' && GRUBDIR='/boot/grub2' && GRUBFILE='grub.cfg'; + [[ -z "$GRUBDIR" ]] && [[ -f '/boot/grub/grub.conf' ]] && GRUBVER='1' && GRUBDIR='/boot/grub' && GRUBFILE='grub.conf'; + [ -z "$GRUBDIR" -o -z "$GRUBFILE" ] && echo -ne "Error! \nNot Found grub.\n" && exit 1; +else + tmpINS='auto' +fi + +if [[ -n "$tmpVER" ]]; then + tmpVER="$(echo "$tmpVER" |sed -r 's/(.*)/\L\1/')"; + if [[ "$tmpVER" == '32' ]] || [[ "$tmpVER" == 'i386' ]] || [[ "$tmpVER" == 'x86' ]]; then + VER='i386'; + fi + if [[ "$tmpVER" == '64' ]] || [[ "$tmpVER" == 'amd64' ]] || [[ "$tmpVER" == 'x86_64' ]] || [[ "$tmpVER" == 'x64' ]]; then + if [[ "$Relese" == 'Debian' ]] || [[ "$Relese" == 'Ubuntu' ]]; then + VER='amd64'; + elif [[ "$Relese" == 'CentOS' ]]; then + VER='x86_64'; + fi + fi +fi +[ -z "$VER" ] && VER='amd64' + +if [[ -z "$tmpDIST" ]]; then + [ "$Relese" == 'Debian' ] && tmpDIST='jessie' && DIST='jessie'; + [ "$Relese" == 'Ubuntu' ] && tmpDIST='bionic' && DIST='bionic'; + [ "$Relese" == 'CentOS' ] && tmpDIST='6.10' && DIST='6.10'; +fi + +if [[ -z "$DIST" ]]; then + if [[ "$Relese" == 'Debian' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p' |cut -d'.' -f1)"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '7' ]] && DIST='wheezy'; + [[ "$isDigital" == '8' ]] && DIST='jessie'; + [[ "$isDigital" == '9' ]] && DIST='stretch'; + [[ "$isDigital" == '10' ]] && DIST='buster'; + } + } + LinuxMirror=$(SelectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'Ubuntu' ]]; then + SpikCheckDIST='0' + DIST="$(echo "$tmpDIST" |sed -r 's/(.*)/\L\1/')"; + echo "$DIST" |grep -q '[0-9]'; + [[ $? -eq '0' ]] && { + isDigital="$(echo "$DIST" |grep -o '[\.0-9]\{1,\}' |sed -n '1h;1!H;$g;s/\n//g;$p')"; + [[ -n $isDigital ]] && { + [[ "$isDigital" == '12.04' ]] && DIST='precise'; + [[ "$isDigital" == '14.04' ]] && DIST='trusty'; + [[ "$isDigital" == '16.04' ]] && DIST='xenial'; + [[ "$isDigital" == '18.04' ]] && DIST='bionic'; + } + } + LinuxMirror=$(SelectMirror "$Relese" "$DIST" "$VER" "$tmpMirror") + fi + if [[ "$Relese" == 'CentOS' ]]; then + SpikCheckDIST='1' + DISTCheck="$(echo "$tmpDIST" |grep -o '[\.0-9]\{1,\}')"; + LinuxMirror=$(SelectMirror "$Relese" "$DISTCheck" "$VER" "$tmpMirror") + ListDIST="$(wget --no-check-certificate -qO- "$LinuxMirror/dir_sizes" |cut -f2 |grep '^[0-9]')" + DIST="$(echo "$ListDIST" |grep "^$DISTCheck" |head -n1)" + [[ -z "$DIST" ]] && { + echo -ne '\nThe dists version not found in this mirror, Please check it! \n\n' + bash $0 error; + exit 1; + } + wget --no-check-certificate -qO- "$LinuxMirror/$DIST/os/$VER/.treeinfo" |grep -q 'general'; + [[ $? != '0' ]] && { + echo -ne "\nThe version not found in this mirror, Please change mirror try again! \n\n"; + exit 1; + } + fi +fi + +if [[ -z "$LinuxMirror" ]]; then + echo -ne "\033[31mError! \033[0mInvaild mirror! \n" + [ "$Relese" == 'Debian' ] && echo -en "\033[33mexample:\033[0m http://deb.debian.org/debian\n\n"; + [ "$Relese" == 'Ubuntu' ] && echo -en "\033[33mexample:\033[0m http://archive.ubuntu.com/ubuntu\n\n"; + [ "$Relese" == 'CentOS' ] && echo -en "\033[33mexample:\033[0m http://mirror.centos.org/centos\n\n"; + bash $0 error; + exit 1; +fi + +if [[ "$SpikCheckDIST" == '0' ]]; then + DistsList="$(wget --no-check-certificate -qO- "$LinuxMirror/dists/" |grep -o 'href=.*/"' |cut -d'"' -f2 |sed '/-\|old\|Debian\|experimental\|stable\|test\|sid\|devel/d' |grep '^[^/]' |sed -n '1h;1!H;$g;s/\n//g;s/\//\;/g;$p')"; + for CheckDEB in `echo "$DistsList" |sed 's/;/\n/g'` + do + [[ "$CheckDEB" == "$DIST" ]] && FindDists='1' && break; + done + [[ "$FindDists" == '0' ]] && { + echo -ne '\nThe dists version not found, Please check it! \n\n' + bash $0 error; + exit 1; + } +fi + +[[ "$ddMode" == '1' ]] && { + export SSL_SUPPORT='https://github.com/xx13295/MD-Note/raw/master/linux/wget_udeb_amd64.tar.gz'; + if [[ -n "$tmpURL" ]]; then + DDURL="$tmpURL" + echo "$DDURL" |grep -q '^http://\|^ftp://\|^https://'; + [[ $? -ne '0' ]] && echo 'Please input vaild URL,Only support http://, ftp:// and https:// !' && exit 1; + [[ -n "$tmpSSL" ]] && SSL_SUPPORT="$tmpSSL"; + else + echo 'Please input vaild image URL! '; + exit 1; + fi +} + +[[ -n "$tmpINS" ]] && { + [[ "$tmpINS" == 'auto' ]] && inVNC='n'; + [[ "$tmpINS" == 'manual' ]] && inVNC='y'; +} + +[ -n "$ipAddr" ] && [ -n "$ipMask" ] && [ -n "$ipGate" ] && setNet='1'; +[[ -n "$tmpWORD" ]] && myPASSWORD="$(openssl passwd -1 "$tmpWORD")"; +[[ -z "$myPASSWORD" ]] && myPASSWORD='$1$85vNtBDU$0wr0WltGEyw9WAsdASQaJ/'; + +if [[ -n "$interface" ]]; then + IFETH="$interface" +else + if [[ "$linux_relese" == 'centos' ]]; then + IFETH="link" + else + IFETH="auto" + fi +fi + +clear && echo -e "\n\033[36m# Install\033[0m\n" + +ASKVNC(){ + inVNC='y'; + [[ "$ddMode" == '0' ]] && { + echo -ne "\033[34mDo you want to install os manually?\033[0m\e[33m[\e[32my\e[33m/n]\e[0m " + read tmpinVNC + [[ -n "$inVNCtmp" ]] && inVNC="$tmpinVNC" + } + [ "$inVNC" == 'y' -o "$inVNC" == 'Y' ] && inVNC='y'; + [ "$inVNC" == 'n' -o "$inVNC" == 'N' ] && inVNC='n'; +} + +[ "$inVNC" == 'y' -o "$inVNC" == 'n' ] || ASKVNC; +[[ "$ddMode" == '0' ]] && { + [[ "$inVNC" == 'y' ]] && echo -e "\033[34mManual Mode\033[0m insatll [\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m] in VNC. " + [[ "$inVNC" == 'n' ]] && echo -e "\033[34mAuto Mode\033[0m insatll [\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m]. " +} +[[ "$ddMode" == '1' ]] && { + echo -ne "\033[34mAuto Mode\033[0m insatll \033[33mWindows\033[0m\n[\033[33m$DDURL\033[0m]\n" +} + +if [[ "$linux_relese" == 'centos' ]]; then + if [[ "$DIST" != "$UNVER" ]]; then + awk 'BEGIN{print '${UNVER}'-'${DIST}'}' |grep -q '^-' + if [ $? != '0' ]; then + UNKNOWHW='1'; + echo -en "\033[33mThe version lower then \033[31m$UNVER\033[33m may not support in auto mode! \033[0m\n"; + if [[ "$inVNC" == 'n' ]]; then + echo -en "\033[35mYou can connect VNC with \033[32mPublic IP\033[35m and port \033[32m1\033[35m/\033[32m5901\033[35m in vnc viewer.\033[0m\n" + read -n 1 -p "Press Enter to continue..." INP + [[ "$INP" != '' ]] && echo -ne '\b \n\n'; + fi + fi + awk 'BEGIN{print '${UNVER}'-'${DIST}'+0.59}' |grep -q '^-' + if [ $? == '0' ]; then + echo -en "\n\033[31mThe version higher then \033[33m6.10 \033[31mis not support in current! \033[0m\n\n" + exit 1; + fi + fi +fi + +echo -e "\n[\033[33m$Relese\033[0m] [\033[33m$DIST\033[0m] [\033[33m$VER\033[0m] Downloading..." + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + inUpdate=''; [ "$linux_relese" == 'ubuntu' ] && inUpdate='-updates' + wget --no-check-certificate -qO '/boot/initrd.img' "${LinuxMirror}/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${linux_relese}-installer/${VER}/initrd.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/boot/vmlinuz' "${LinuxMirror}/dists/${DIST}${inUpdate}/main/installer-${VER}/current/images/netboot/${linux_relese}-installer/${VER}/linux" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + MirrorHost="$(echo "$LinuxMirror" |awk -F'://|/' '{print $2}')"; + MirrorFolder="$(echo "$LinuxMirror" |awk -F''${MirrorHost}'' '{print $2}')"; +elif [[ "$linux_relese" == 'centos' ]]; then + wget --no-check-certificate -qO '/boot/initrd.img' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/initrd.img" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'initrd.img' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + wget --no-check-certificate -qO '/boot/vmlinuz' "${LinuxMirror}/${DIST}/os/${VER}/isolinux/vmlinuz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'vmlinuz' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 +else + bash $0 error; + exit 1; +fi +if [[ "$linux_relese" == 'debian' ]]; then + if [[ "$IncFirmware" == '1' ]]; then + wget --no-check-certificate -qO '/boot/firmware.cpio.gz' "http://cdimage.debian.org/cdimage/unofficial/non-free/firmware/${DIST}/current/firmware.cpio.gz" + [[ $? -ne '0' ]] && echo -ne "\033[31mError! \033[0mDownload 'firmware' for \033[33m$linux_relese\033[0m failed! \n" && exit 1 + fi + if [[ "$ddMode" == '1' ]]; then + vKernel_udeb=$(wget --no-check-certificate -qO- "http://$DISTMirror/dists/$DIST/main/installer-$VER/current/images/udeb.list" |grep '^acpi-modules' |head -n1 |grep -o '[0-9]\{1,2\}.[0-9]\{1,2\}.[0-9]\{1,2\}-[0-9]\{1,2\}' |head -n1) + [[ -z "vKernel_udeb" ]] && vKernel_udeb="3.16.0-6" + fi +fi + +[[ "$setNet" == '1' ]] && { + IPv4="$ipAddr"; + MASK="$ipMask"; + GATE="$ipGate"; +} || { + DEFAULTNET="$(ip route show |grep -o 'default via [0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.*' |head -n1 |sed 's/proto.*\|onlink.*//g' |awk '{print $NF}')"; + [[ -n "$DEFAULTNET" ]] && IPSUB="$(ip addr |grep ''${DEFAULTNET}'' |grep 'global' |grep 'brd' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}/[0-9]\{1,2\}')"; + IPv4="$(echo -n "$IPSUB" |cut -d'/' -f1)"; + NETSUB="$(echo -n "$IPSUB" |grep -o '/[0-9]\{1,2\}')"; + GATE="$(ip route show |grep -o 'default via [0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}')"; + [[ -n "$NETSUB" ]] && MASK="$(echo -n '128.0.0.0/1,192.0.0.0/2,224.0.0.0/3,240.0.0.0/4,248.0.0.0/5,252.0.0.0/6,254.0.0.0/7,255.0.0.0/8,255.128.0.0/9,255.192.0.0/10,255.224.0.0/11,255.240.0.0/12,255.248.0.0/13,255.252.0.0/14,255.254.0.0/15,255.255.0.0/16,255.255.128.0/17,255.255.192.0/18,255.255.224.0/19,255.255.240.0/20,255.255.248.0/21,255.255.252.0/22,255.255.254.0/23,255.255.255.0/24,255.255.255.128/25,255.255.255.192/26,255.255.255.224/27,255.255.255.240/28,255.255.255.248/29,255.255.255.252/30,255.255.255.254/31,255.255.255.255/32' |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}'${NETSUB}'' |cut -d'/' -f1)"; +} + +[[ -n "$GATE" ]] && [[ -n "$MASK" ]] && [[ -n "$IPv4" ]] || { +echo "Not found \`ip command\`, It will use \`route command\`." +ipNum() { + local IFS='.'; + read ip1 ip2 ip3 ip4 <<<"$1"; + echo $((ip1*(1<<24)+ip2*(1<<16)+ip3*(1<<8)+ip4)); +} + +SelectMax(){ +ii=0; +for IPITEM in `route -n |awk -v OUT=$1 '{print $OUT}' |grep '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}'` + do + NumTMP="$(ipNum $IPITEM)"; + eval "arrayNum[$ii]='$NumTMP,$IPITEM'"; + ii=$[$ii+1]; + done +echo ${arrayNum[@]} |sed 's/\s/\n/g' |sort -n -k 1 -t ',' |tail -n1 |cut -d',' -f2; +} + +[[ -z $IPv4 ]] && IPv4="$(ifconfig |grep 'Bcast' |head -n1 |grep -o '[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}.[0-9]\{1,3\}' |head -n1)"; +[[ -z $GATE ]] && GATE="$(SelectMax 2)"; +[[ -z $MASK ]] && MASK="$(SelectMax 3)"; + +[[ -n "$GATE" ]] && [[ -n "$MASK" ]] && [[ -n "$IPv4" ]] || { + echo "Error! Not configure network. "; + exit 1; +} +} + +[[ "$setNet" != '1' ]] && [[ -f '/etc/network/interfaces' ]] && { + [[ -z "$(sed -n '/iface.*inet static/p' /etc/network/interfaces)" ]] && AutoNet='1' || AutoNet='0'; + [[ -d /etc/network/interfaces.d ]] && { + ICFGN="$(find /etc/network/interfaces.d -name '*.cfg' |wc -l)" || ICFGN='0'; + [[ "$ICFGN" -ne '0' ]] && { + for NetCFG in `ls -1 /etc/network/interfaces.d/*.cfg` + do + [[ -z "$(cat $NetCFG | sed -n '/iface.*inet static/p')" ]] && AutoNet='1' || AutoNet='0'; + [[ "$AutoNet" -eq '0' ]] && break; + done + } + } +} + +[[ "$setNet" != '1' ]] && [[ -d '/etc/sysconfig/network-scripts' ]] && { + ICFGN="$(find /etc/sysconfig/network-scripts -name 'ifcfg-*' |grep -v 'lo'|wc -l)" || ICFGN='0'; + [[ "$ICFGN" -ne '0' ]] && { + for NetCFG in `ls -1 /etc/sysconfig/network-scripts/ifcfg-* |grep -v 'lo$' |grep -v ':[0-9]\{1,\}'` + do + [[ -n "$(cat $NetCFG | sed -n '/BOOTPROTO.*[dD][hH][cC][pP]/p')" ]] && AutoNet='1' || { + AutoNet='0' && . $NetCFG; + [[ -n $NETMASK ]] && MASK="$NETMASK"; + [[ -n $GATEWAY ]] && GATE="$GATEWAY"; + } + [[ "$AutoNet" -eq '0' ]] && break; + done + } +} + +if [[ "$loaderMode" == "0" ]]; then + [[ ! -f $GRUBDIR/$GRUBFILE ]] && echo "Error! Not Found $GRUBFILE. " && exit 1; + + [[ ! -f $GRUBDIR/$GRUBFILE.old ]] && [[ -f $GRUBDIR/$GRUBFILE.bak ]] && mv -f $GRUBDIR/$GRUBFILE.bak $GRUBDIR/$GRUBFILE.old; + mv -f $GRUBDIR/$GRUBFILE $GRUBDIR/$GRUBFILE.bak; + [[ -f $GRUBDIR/$GRUBFILE.old ]] && cat $GRUBDIR/$GRUBFILE.old >$GRUBDIR/$GRUBFILE || cat $GRUBDIR/$GRUBFILE.bak >$GRUBDIR/$GRUBFILE; +else + GRUBVER='2' +fi + +[[ "$GRUBVER" == '0' ]] && { + READGRUB='/tmp/grub.read' + cat $GRUBDIR/$GRUBFILE |sed -n '1h;1!H;$g;s/\n/%%%%%%%/g;$p' |grep -om 1 'menuentry\ [^{]*{[^}]*}%%%%%%%' |sed 's/%%%%%%%/\n/g' >$READGRUB + LoadNum="$(cat $READGRUB |grep -c 'menuentry ')" + if [[ "$LoadNum" -eq '1' ]]; then + cat $READGRUB |sed '/^$/d' >/tmp/grub.new; + elif [[ "$LoadNum" -gt '1' ]]; then + CFG0="$(awk '/menuentry /{print NR}' $READGRUB|head -n 1)"; + CFG2="$(awk '/menuentry /{print NR}' $READGRUB|head -n 2 |tail -n 1)"; + CFG1=""; + for tmpCFG in `awk '/}/{print NR}' $READGRUB` + do + [ "$tmpCFG" -gt "$CFG0" -a "$tmpCFG" -lt "$CFG2" ] && CFG1="$tmpCFG"; + done + [[ -z "$CFG1" ]] && { + echo "Error! read $GRUBFILE. "; + exit 1; + } + + sed -n "$CFG0,$CFG1"p $READGRUB >/tmp/grub.new; + [[ -f /tmp/grub.new ]] && [[ "$(grep -c '{' /tmp/grub.new)" -eq "$(grep -c '}' /tmp/grub.new)" ]] || { + echo -ne "\033[31mError! \033[0mNot configure $GRUBFILE. \n"; + exit 1; + } + fi + [ ! -f /tmp/grub.new ] && echo "Error! $GRUBFILE. " && exit 1; + sed -i "/menuentry.*/c\menuentry\ \'Install OS \[$DIST\ $VER\]\'\ --class debian\ --class\ gnu-linux\ --class\ gnu\ --class\ os\ \{" /tmp/grub.new + sed -i "/echo.*Loading/d" /tmp/grub.new; + INSERTGRUB="$(awk '/menuentry /{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +[[ "$GRUBVER" == '1' ]] && { + CFG0="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)"; + CFG1="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 2 |tail -n 1)"; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 == $CFG0 ] && sed -n "$CFG0,$"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ -n $CFG0 ]] && [ -z $CFG1 -o $CFG1 != $CFG0 ] && sed -n "$CFG0,$[$CFG1-1]"p $GRUBDIR/$GRUBFILE >/tmp/grub.new; + [[ ! -f /tmp/grub.new ]] && echo "Error! configure append $GRUBFILE. " && exit 1; + sed -i "/title.*/c\title\ \'Install OS \[$DIST\ $VER\]\'" /tmp/grub.new; + sed -i '/^#/d' /tmp/grub.new; + INSERTGRUB="$(awk '/title[\ ]|title[\t]/{print NR}' $GRUBDIR/$GRUBFILE|head -n 1)" +} + +if [[ "$loaderMode" == "0" ]]; then +[[ -n "$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $2}' |tail -n 1 |grep '^/boot/')" ]] && Type='InBoot' || Type='NoBoot'; + +LinuxKernel="$(grep 'linux.*/\|kernel.*/' /tmp/grub.new |awk '{print $1}' |head -n 1)"; +[[ -z "$LinuxKernel" ]] && echo "Error! read grub config! " && exit 1; +LinuxIMG="$(grep 'initrd.*/' /tmp/grub.new |awk '{print $1}' |tail -n 1)"; +[ -z "$LinuxIMG" ] && sed -i "/$LinuxKernel.*\//a\\\tinitrd\ \/" /tmp/grub.new && LinuxIMG='initrd'; + +if [[ "$setInterfaceName" == "1" ]]; then + Add_OPTION="net.ifnames=0 biosdevname=0"; +else + Add_OPTION=""; +fi + +if [[ "$setIPv6" == "1" ]]; then + Add_OPTION="$Add_OPTION ipv6.disable=1"; +fi + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + BOOT_OPTION="auto=true $Add_OPTION hostname=$linux_relese domain= -- quiet" +elif [[ "$linux_relese" == 'centos' ]]; then + BOOT_OPTION="ks=file://ks.cfg $Add_OPTION ksdevice=$IFETH" +fi + +[[ "$Type" == 'InBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/boot\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/boot\/initrd.img" /tmp/grub.new; +} + +[[ "$Type" == 'NoBoot' ]] && { + sed -i "/$LinuxKernel.*\//c\\\t$LinuxKernel\\t\/vmlinuz $BOOT_OPTION" /tmp/grub.new; + sed -i "/$LinuxIMG.*\//c\\\t$LinuxIMG\\t\/initrd.img" /tmp/grub.new; +} + +sed -i '$a\\n' /tmp/grub.new; +fi + +[[ "$inVNC" == 'n' ]] && { +GRUBPATCH='0'; + +if [[ "$loaderMode" == "0" ]]; then +[ -f '/etc/network/interfaces' -o -d '/etc/sysconfig/network-scripts' ] || { + echo "Error, Not found interfaces config."; + exit 1; +} + +sed -i ''${INSERTGRUB}'i\\n' $GRUBDIR/$GRUBFILE; +sed -i ''${INSERTGRUB}'r /tmp/grub.new' $GRUBDIR/$GRUBFILE; +[[ -f $GRUBDIR/grubenv ]] && sed -i 's/saved_entry/#saved_entry/g' $GRUBDIR/grubenv; +fi + +[[ -d /tmp/boot ]] && rm -rf /tmp/boot; +mkdir -p /tmp/boot; +cd /tmp/boot; +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then + COMPTYPE="gzip"; +elif [[ "$linux_relese" == 'centos' ]]; then + COMPTYPE="$(file /boot/initrd.img |grep -o ':.*compressed data' |cut -d' ' -f2 |sed -r 's/(.*)/\L\1/' |head -n1)" + [[ -z "$COMPTYPE" ]] && echo "Detect compressed type fail." && exit 1; +fi +CompDected='0' +for ListCOMP in `echo -en 'gzip\nlzma\nxz'` + do + if [[ "$COMPTYPE" == "$ListCOMP" ]]; then + CompDected='1' + if [[ "$COMPTYPE" == 'gzip' ]]; then + NewIMG="initrd.img.gz" + else + NewIMG="initrd.img.$COMPTYPE" + fi + mv -f "/boot/initrd.img" "/tmp/$NewIMG" + break; + fi + done +[[ "$CompDected" != '1' ]] && echo "Detect compressed type not support." && exit 1; +[[ "$COMPTYPE" == 'lzma' ]] && UNCOMP='xz --format=lzma --decompress'; +[[ "$COMPTYPE" == 'xz' ]] && UNCOMP='xz --decompress'; +[[ "$COMPTYPE" == 'gzip' ]] && UNCOMP='gzip -d'; + +$UNCOMP < /tmp/$NewIMG | cpio --extract --verbose --make-directories --no-absolute-filenames >>/dev/null 2>&1 + +if [[ "$linux_relese" == 'debian' ]] || [[ "$linux_relese" == 'ubuntu' ]]; then +cat >/tmp/boot/preseed.cfg<>/dev/null 2>&1 +} + +[[ "$ddMode" == '1' ]] && { +WinNoDHCP(){ + echo -ne "for\0040\0057f\0040\0042tokens\00753\0052\0042\0040\0045\0045i\0040in\0040\0050\0047netsh\0040interface\0040show\0040interface\0040\0136\0174more\0040\00533\0040\0136\0174findstr\0040\0057I\0040\0057R\0040\0042本地\0056\0052\0040以太\0056\0052\0040Local\0056\0052\0040Ethernet\0042\0047\0051\0040do\0040\0050set\0040EthName\0075\0045\0045j\0051\r\nnetsh\0040\0055c\0040interface\0040ip\0040set\0040address\0040name\0075\0042\0045EthName\0045\0042\0040source\0075static\0040address\0075$IPv4\0040mask\0075$MASK\0040gateway\0075$GATE\r\nnetsh\0040\0055c\0040interface\0040ip\0040add\0040dnsservers\0040name\0075\0042\0045EthName\0045\0042\0040address\00758\00568\00568\00568\0040index\00751\0040validate\0075no\r\n\r\n" >>'/tmp/boot/net.tmp'; +} +WinRDP(){ + echo -ne "netsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075ALL\r\nnetsh\0040firewall\0040set\0040portopening\0040protocol\0075ALL\0040port\0075$WinRemote\0040name\0075RDP\0040mode\0075ENABLE\0040scope\0075ALL\0040profile\0075CURRENT\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Network\0134NewNetworkWindowOff\0042\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0042\0040\0057v\0040fDenyTSConnections\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134Wds\0134rdpwd\0134Tds\0134tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040PortNumber\0040\0057t\0040reg\0137dword\0040\0057d\0040$WinRemote\0040\0057f\r\nreg\0040add\0040\0042HKLM\0134SYSTEM\0134CurrentControlSet\0134Control\0134Terminal\0040Server\0134WinStations\0134RDP\0055Tcp\0042\0040\0057v\0040UserAuthentication\0040\0057t\0040reg\0137dword\0040\0057d\00400\0040\0057f\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040TermService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nFOR\0040\0057F\0040\0042tokens\00752\0040delims\0075\0072\0042\0040\0045\0045i\0040in\0040\0050\0047SC\0040QUERYEX\0040UmRdpService\0040\0136\0174FINDSTR\0040\0057I\0040\0042PID\0042\0047\0051\0040do\0040TASKKILL\0040\0057F\0040\0057PID\0040\0045\0045i\r\nSC\0040START\0040TermService\r\n\r\n" >>'/tmp/boot/net.tmp'; +} + echo -ne "\0100ECHO\0040OFF\r\n\r\ncd\0056\0076\0045WINDIR\0045\0134GetAdmin\r\nif\0040exist\0040\0045WINDIR\0045\0134GetAdmin\0040\0050del\0040\0057f\0040\0057q\0040\0042\0045WINDIR\0045\0134GetAdmin\0042\0051\0040else\0040\0050\r\necho\0040CreateObject\0136\0050\0042Shell\0056Application\0042\0136\0051\0056ShellExecute\0040\0042\0045\0176s0\0042\0054\0040\0042\0045\0052\0042\0054\0040\0042\0042\0054\0040\0042runas\0042\0054\00401\0040\0076\0076\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\n\0042\0045temp\0045\0134Admin\0056vbs\0042\r\ndel\0040\0057f\0040\0057q\0040\0042\0045temp\0045\0134Admin\0056vbs\0042\r\nexit\0040\0057b\00402\0051\r\n\r\n" >'/tmp/boot/net.tmp'; + [[ "$setNet" == '1' ]] && WinNoDHCP; + [[ "$setNet" == '0' ]] && [[ "$AutoNet" == '0' ]] && WinNoDHCP; + [[ "$setRDP" == '1' ]] && [[ -n "$WinRemote" ]] && WinRDP + echo -ne "ECHO\0040SELECT\0040VOLUME\0075\0045\0045SystemDrive\0045\0045\0040\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nECHO\0040EXTEND\0040\0076\0076\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nSTART\0040/WAIT\0040DISKPART\0040\0057S\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\nDEL\0040\0057f\0040\0057q\0040\0042\0045SystemDrive\0045\0134diskpart\0056extend\0042\r\n\r\n" >>'/tmp/boot/net.tmp'; + echo -ne "cd\0040\0057d\0040\0042\0045ProgramData\0045\0057Microsoft\0057Windows\0057Start\0040Menu\0057Programs\0057Startup\0042\r\ndel\0040\0057f\0040\0057q\0040net\0056bat\r\n\r\n\r\n" >>'/tmp/boot/net.tmp'; + iconv -f 'UTF-8' -t 'GBK' '/tmp/boot/net.tmp' -o '/tmp/boot/net.bat' + rm -rf '/tmp/boot/net.tmp' + echo "$DDURL" |grep -q '^https://' + [[ $? -eq '0' ]] && { + echo -ne '\nAdd ssl support...\n' + [[ -n $SSL_SUPPORT ]] && { + wget --no-check-certificate -qO- "$SSL_SUPPORT" |tar -x + [[ ! -f /tmp/boot/usr/bin/wget ]] && echo 'Error! SSL_SUPPORT.' && exit 1; + sed -i 's/wget\ -qO-/\/usr\/bin\/wget\ --no-check-certificate\ --retry-connrefused\ --tries=7\ --continue\ -qO-/g' /tmp/boot/preseed.cfg + [[ $? -eq '0' ]] && echo -ne 'Success! \n\n' + } || { + echo -ne 'Not ssl support package! \n\n'; + exit 1; + } + } +} + +[[ "$ddMode" == '0' ]] && { + sed -i '/anna-install/d' /tmp/boot/preseed.cfg + sed -i 's/wget.*\/sbin\/reboot\;\ //g' /tmp/boot/preseed.cfg +} + +elif [[ "$linux_relese" == 'centos' ]]; then +cat >/tmp/boot/ks.cfg< /boot/initrd.img; +rm -rf /tmp/boot; +} + +[[ "$inVNC" == 'y' ]] && { + sed -i '$i\\n' $GRUBDIR/$GRUBFILE + sed -i '$r /tmp/grub.new' $GRUBDIR/$GRUBFILE + echo -e "\n\033[33m\033[04mIt will reboot! \nPlease connect VNC! \nSelect\033[0m\033[32m Install OS [$DIST $VER] \033[33m\033[4mto install system.\033[04m\n\n\033[31m\033[04mThere is some information for you.\nDO NOT CLOSE THE WINDOW! \033[0m\n" + echo -e "\033[35mIPv4\t\tNETMASK\t\tGATEWAY\033[0m" + echo -e "\033[36m\033[04m$IPv4\033[0m\t\033[36m\033[04m$MASK\033[0m\t\033[36m\033[04m$GATE\033[0m\n\n" + + read -n 1 -p "Press Enter to reboot..." INP + [[ "$INP" != '' ]] && echo -ne '\b \n\n'; +} + +chown root:root $GRUBDIR/$GRUBFILE +chmod 444 $GRUBDIR/$GRUBFILE + +if [[ "$loaderMode" == "0" ]]; then + sleep 3 && reboot >/dev/null 2>&1 +else + rm -rf "$HOME/loader" + mkdir -p "$HOME/loader" + cp -rf "/boot/initrd.img" "$HOME/loader/initrd.img" + cp -rf "/boot/vmlinuz" "$HOME/loader/vmlinuz" + [[ -f "/boot/initrd.img" ]] && rm -rf "/boot/initrd.img" + [[ -f "/boot/vmlinuz" ]] && rm -rf "/boot/vmlinuz" + echo && ls -AR1 "$HOME/loader" +fi diff --git a/linux/wget_udeb_amd64.tar.gz b/linux/wget_udeb_amd64.tar.gz new file mode 100644 index 0000000..b87af11 Binary files /dev/null and b/linux/wget_udeb_amd64.tar.gz differ diff --git "a/linux/\344\270\212\344\274\240\344\270\213\350\275\275\346\226\207\344\273\266.md" "b/linux/\344\270\212\344\274\240\344\270\213\350\275\275\346\226\207\344\273\266.md" new file mode 100644 index 0000000..277ade0 --- /dev/null +++ "b/linux/\344\270\212\344\274\240\344\270\213\350\275\275\346\226\207\344\273\266.md" @@ -0,0 +1,34 @@ +# 在linux中rz 和 sz 命令允许开发者与主机通过串口进行传递文件 + +>sudo yum install lrzsz -y + +### 上传文件 + +>rz + +### 下载文件 + +>sz [fileName] + + + + +## 方式二 自己编译安装 + +>wget https://static.ojbk.plus/soft/lrzsz-0.12.20.tar.gz + +>tar zxvf lrzsz-0.12.20 + +>cd lrzsz-0.12.20 + +>./configure + +>make + +>sudo make install + +>cd /usr/bin + +>sudo ln -s /usr/local/bin/lrz rz + +>sudo ln -s /usr/local/bin/lsz sz \ No newline at end of file diff --git "a/linux/\345\206\205\347\275\221\347\251\277\351\200\217\345\267\245\345\205\267.md" "b/linux/\345\206\205\347\275\221\347\251\277\351\200\217\345\267\245\345\205\267.md" new file mode 100644 index 0000000..44ea2f8 --- /dev/null +++ "b/linux/\345\206\205\347\275\221\347\251\277\351\200\217\345\267\245\345\205\267.md" @@ -0,0 +1,22 @@ +## 下载工具 + + + git clone https://github.com/open-dingtalk/pierced.git +#### 启动ngrok + 启动工具,执行命令“./ding -config=./ding.cfg -subdomain= 域名前缀 端口”,以 linux 为例: + + +>cd mac_64 + +. + +>chmod 755 ./ding + +. + +>./ding -config=./ding.cfg -subdomain=ojbk 8080 + + +## win + +>ding.exe -config=ding.cfg -subdomain=ojbk 8080 diff --git "a/linux/\345\270\270\347\224\250\345\221\275\344\273\244\346\212\245\351\224\231.md" "b/linux/\345\270\270\347\224\250\345\221\275\344\273\244\346\212\245\351\224\231.md" new file mode 100644 index 0000000..e2f59a4 --- /dev/null +++ "b/linux/\345\270\270\347\224\250\345\221\275\344\273\244\346\212\245\351\224\231.md" @@ -0,0 +1,13 @@ +# bash:vi:command not find + + 这是由于系统 PATH 设置问题,PATH没有设置正确,系统就无法找到精确命令了。 + + + +>在命令行中输入:export PATH=/usr/bin:/usr/sbin:/bin:/sbin:/usr/X11R6/bin  这样可以保证命令行命令暂时可以使用 + +>在命令行中输入 vi ~/.bash_profile + +>仔细检查自己配置的 PATH 属性是否有错误,(可能是字母错误或者标点符号错误),改正后 :wq 报错退出。 + +>执行 source ~/.bash_profile 使配置生效即可 \ No newline at end of file diff --git "a/linux/\350\247\243\345\216\213\347\274\251/README.md" "b/linux/\350\247\243\345\216\213\347\274\251/README.md" new file mode 100644 index 0000000..beea8e5 --- /dev/null +++ "b/linux/\350\247\243\345\216\213\347\274\251/README.md" @@ -0,0 +1,55 @@ +# linux服务器上的压缩包的常用命令 + +.tar 解包 tar xvf filename.tar +.tar 打包 tar cvf filename.tar dirname + +.gz 解压1 gunzip filename.gz +.gz 解压2 gzip -d filename.gz +.gz 压缩 gzip filename + +.tar.gz 和 .tgz 解压 tar zxvf filename.tar.gz +.tar.gz 和 .tgz 压缩 tar zcvf filename.tar.gz dirname + +.bz2 解压1 bzip2 -d filename.bz2 +.bz2 解压2 bunzip2 filename.bz2 +.bz2 压缩 bzip2 -z filename + +.tar.bz2 解压 tar jxvf filename.tar.bz2 +.tar.bz2 压缩 tar jcvf filename.tar.bz2 dirname + +.bz 解压1 bzip2 -d filename.bz +.bz 解压2 bunzip2 filename.bz +.tar.bz 解压 tar jxvf filename.tar.bz + +.z 解压 uncompress filename.z +.z 压缩 compress filename + +.tar.z 解压 tar zxvf filename.tar.z +.tar.z 压缩 tar zcvf filename.tar.z dirname + +.zip 解压 unzip filename.zip +.zip 压缩 zip filename.zip dirname + +.rar 解压 rar x filename.rar +.rar 压缩 rar a filename.rar dirname + +lzop工具最适合在注重压缩速度的场合,压缩文件时会新建.lzo文件,而原文件保持不变(使用-U选项除外) + +lzop -v test 创建test.lzo压缩文件,输出详细信息,保留test文件不变 + +lzop -Uv test 创建test.lzo压缩文件,输出详细信息,删除test文件 + +lzop -t test.lzo 测试test.lzo压缩文件的完整性 + +lzop –info test.lzo 列出test.lzo中各个文件的文件头 + +lzop -l test.lzo 列出test.lzo中各个文件的压缩信息 + +lzop –ls test.lzo 列出test.lzo文件的内容,同ls -l功能 + +cat test | lzop > t.lzo 压缩标准输入并定向到标准输出 + +lzop -dv test.lzo 解压test.lzo得到test文件,输出详细信息,保留test.lzo不变 + +注:lzop没有unlzop命令,只能加上-d选项解压,向lzop传入一组文件和目录名时,lzop会压缩所有文件但是会忽略目录, +压缩文件保留原来文件的权限设置和时间戳 \ No newline at end of file diff --git "a/linux/\350\265\213\344\272\210\347\211\271\345\256\232\347\224\250\346\210\267\347\211\271\345\256\232\346\226\207\344\273\266\345\244\271\347\232\204\350\257\273\345\206\231\346\235\203\351\231\220.md" "b/linux/\350\265\213\344\272\210\347\211\271\345\256\232\347\224\250\346\210\267\347\211\271\345\256\232\346\226\207\344\273\266\345\244\271\347\232\204\350\257\273\345\206\231\346\235\203\351\231\220.md" new file mode 100644 index 0000000..06283e1 --- /dev/null +++ "b/linux/\350\265\213\344\272\210\347\211\271\345\256\232\347\224\250\346\210\267\347\211\271\345\256\232\346\226\207\344\273\266\345\244\271\347\232\204\350\257\273\345\206\231\346\235\203\351\231\220.md" @@ -0,0 +1,10 @@ +setfacl -m u:username:rwx file 设定username对file拥有rwx权限 + + +setfacl -m g:group:rwz file 设定group 组成员对file拥有rwx权限 + + + +setfacl -x u:username file 从cal列表中删除username + +setfacl -b file 关闭file上的acl列表 \ No newline at end of file diff --git "a/linux/\351\230\262\347\201\253\345\242\231/centOS7.md" "b/linux/\351\230\262\347\201\253\345\242\231/centOS7.md" new file mode 100644 index 0000000..85fb8a7 --- /dev/null +++ "b/linux/\351\230\262\347\201\253\345\242\231/centOS7.md" @@ -0,0 +1,35 @@ +#开启关闭服务 + +### 开启 + +>service firewalld start + +### 重启 + +>service firewalld restart + +### 关闭 + +>service firewalld stop + +# 查看当前开放端口 + +>firewall-cmd --list-all + +# 开放233端口 + +>firewall-cmd --permanent --add-port=233/tcp + +# 移除端口 + +>firewall-cmd --permanent --remove-port=8080/tcp + +#重启防火墙(修改配置后要重启防火墙) + +>firewall-cmd --reload + +# 参数解释 + +1、firwall-cmd:是Linux提供的操作firewall的一个工具; +2、--permanent:表示设置为持久; +3、--add-port:标识添加的端口; \ No newline at end of file diff --git "a/linux/\351\231\220\345\210\266\350\277\233\347\250\213cpu\345\215\240\346\234\211\347\216\207/README.md" "b/linux/\351\231\220\345\210\266\350\277\233\347\250\213cpu\345\215\240\346\234\211\347\216\207/README.md" new file mode 100644 index 0000000..438fc1d --- /dev/null +++ "b/linux/\351\231\220\345\210\266\350\277\233\347\250\213cpu\345\215\240\346\234\211\347\216\207/README.md" @@ -0,0 +1,46 @@ +# 限制cpu占有率过高 + + 安装cpulimit + +## 安装 + +#### 方式一 + +>wget http://downloads.sourceforge.net/cpulimit/cpulimit-1.1.tar.gz + +>tar -zxvf cpulimit-1.1.tar.gz + +>cd cpulimit-1.1 + +>make + +>cp cpulimit /usr/local/sbin/ + +#### 方式二 + Debian / Ubuntu + +>sudo apt-get install cpulimit + + Centos + +>sudo yum install cpulimit + + +## 如何使用cpulimit? + +>cd /usr/local/sbin/ + + 例如限制ffmpeg cpu利用率 + +>sudo nohup ./cpulimit -e ffmpeg -l 65 1>/dev/null 2>&1 & + + 限制进程号30519的程序cpu使用率为75% + +>sudo nohup ./cpulimit -p 30519 -l 75 1>/dev/null 2>&1 & + + 可以使用绝对路径限制进程的cpu利用率 + +>cpulimit -P /usr/local/redis/bin/redis-server -l 50 + + +考察资料 :https://scoutapm.com/blog/restricting-process-cpu-usage-using-nice-cpulimit-and-cgroups diff --git "a/maven/\345\244\226\351\203\250\351\235\231\346\200\201\346\226\207\344\273\266.md" "b/maven/\345\244\226\351\203\250\351\235\231\346\200\201\346\226\207\344\273\266.md" new file mode 100644 index 0000000..ea3888f --- /dev/null +++ "b/maven/\345\244\226\351\203\250\351\235\231\346\200\201\346\226\207\344\273\266.md" @@ -0,0 +1 @@ +http://maven.apache.org/plugins/maven-war-plugin/examples/adding-filtering-webresources.html \ No newline at end of file diff --git "a/maven/\351\205\215\347\275\256gpg-key.md" "b/maven/\351\205\215\347\275\256gpg-key.md" new file mode 100644 index 0000000..a1ea6c6 --- /dev/null +++ "b/maven/\351\205\215\347\275\256gpg-key.md" @@ -0,0 +1,81 @@ +# 配置GPG-KEY + +``` + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + verify + + sign + + + + + +``` + + + + +下载gpg4win: + +https://files.gpg4win.org/gpg4win-3.1.15.exe + + + +>gpg --gen-key + +来生成密钥,需要输入名字和邮箱,并弹出一个对话框来输入密钥的保护密码,一定要记住,后面会用到 + +> gpg --list-keys + + +查看生成的密钥,得到类似如下的输出,敏感部分打了掩码,pub就是生成的公钥 + +>gpg --keyserver hkp://pool.sks-keyservers.net --send-keys 0276B66ACF24A*****D69225F2323 + +>gpg --keyserver hkp://pool.sks-keyservers.net --recv-keys 0276B66ACF24A*****D69225F2323 + + +POM + +``` + + + + + sonatype-nexus-snapshots + https://s01.oss.sonatype.org/content/repositories/snapshots/ + + + sonatype-nexus-releases + https://s01.oss.sonatype.org/content/repositories/releases/ + + + +``` + +settings.xml + +``` + + sonatype-nexus-releases + user + pass + + + + gpg.passphrase + 你的密钥 + + + +``` + + +>mvn clean deploy -P sonatype-oss-release diff --git a/minio/README.md b/minio/README.md new file mode 100644 index 0000000..0b980ff --- /dev/null +++ b/minio/README.md @@ -0,0 +1,131 @@ +# 注意事项 + + 可能不适合新手观看 哈哈 以下内容属于 随笔记录 + +## 无脑docker 版 + +docker run -itd -p 9000:9000 --name minio \ +-e "MINIO_ACCESS_KEY=ojbk" \ +-e "MINIO_SECRET_KEY=12345678" \ +-v ~/docker/minio/data:/data \ +-v ~/docker/minio/config:/root/.minio \ +minio/minio server /data + + +###### +export MINIO_ACCESS_KEY=admin +export MINIO_SECRET_KEY=12345678 + + +### 二进制go编译现成版 + +wget https://dl.min.io/server/minio/release/linux-amd64/minio + +chmod +x minio +./minio server /home/ojbk/data + +nohup ./minio server --address 0.0.0.0:33333 /home/ojbk/data > minio.log 2>&1 & + + + + +# 自己编译版 + +首先要有go 环境 + + 需要科学上网 + +wget https://dl.google.com/go/go1.14.5.linux-amd64.tar.gz + +root账户执行 普通账户注意权限问题(毕竟usr/local 这个目录普通用户是无法操作的) + +tar -C /usr/local -xzf go1.4.linux-amd64.tar.gz + +cd + +加入环境变量 +vi .profile + +export PATH=$PATH:/usr/local/go/bin + +保存 + + 当然你要去https://github.com/minio/minio 下一份go的源码 + + #set CGO_ENABLED=0 + set GOOS=linux + set GOARCH=amd64 + ### 毕竟go被墙了 + export GOPROXY=https://goproxy.io + go build main.go + + + +### mc 客户端 + + + wget https://dl.min.io/client/mc/release/linux-amd64/mc + chmod +x mc + + ln -s /root/sf/mc /usr/bin/mc + + + + mc config host add minio http://ip:port QW544GDG67AHD XH584GHGSFGIK --api S3v4 + + + 配置策略命令查看: mc policy + + + 设置 bucket 匿名访问 即开放权限 (image 为bucket, 前面的minio 为配置文件中的别名) +具体位置 /root/sf/.mc/config.json + + mc policy set download minio/image + + +``` + + { + "version": "9", + "hosts": { + "gcs": { + "url": "https://storage.googleapis.com", + "accessKey": "YOUR-ACCESS-KEY-HERE", + "secretKey": "YOUR-SECRET-KEY-HERE", + "api": "S3v2", + "lookup": "dns" + }, + "local": { + "url": "http://localhost:33333", + "accessKey": "*****", + "secretKey": "*****", + "api": "S3v4", + "lookup": "auto" + }, + "minio": { + "url": "http://ip:port", + "accessKey": "QW544GDG67AHD", + "secretKey": "XH584GHGSFGIK", + "api": "S3v4", + "lookup": "auto" + }, + "play": { + "url": "https://play.min.io", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v4", + "lookup": "auto" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "YOUR-ACCESS-KEY-HERE", + "secretKey": "YOUR-SECRET-KEY-HERE", + "api": "S3v4", + "lookup": "dns" + } + } + } + +``` + + diff --git a/minio/minio-java-sdk.md b/minio/minio-java-sdk.md new file mode 100644 index 0000000..7ac9301 --- /dev/null +++ b/minio/minio-java-sdk.md @@ -0,0 +1,66 @@ +# 1.依赖 + + + io.minio + minio + 7.1.0 + + + +# 2.示例 + + +``` + + import io.minio.*; + import io.minio.http.Method; + + import java.io.ByteArrayInputStream; + import java.util.concurrent.TimeUnit; + + public class MinioTest { + + public static void main(String[] args) throws Exception { + + MinioClient minioClient = MinioClient.builder() + .endpoint("http://106.52.14.247:33333") + .credentials("xhajshfjgha", "sdhjahwxcsd") + .build(); + + //检查 bucket 是否存在 不存在就创建 + boolean isExist = minioClient.bucketExists(BucketExistsArgs.builder().bucket("ojbk").build()); + if(isExist) { + System.out.println("Bucket already exists."); + } else { + minioClient.makeBucket(MakeBucketArgs.builder().bucket("ojbk").build()); + } + //文件上传 选择 ojbk 这个 bucket 将/home/wxm/webflux/webflux.log 这个文件 存储在minio服务器上 命名为ojbk.log + minioClient.uploadObject(UploadObjectArgs.builder().bucket("ojbk").object("ojbk.log").filename("/home/wxm/webflux/webflux.log").build()); + //minioClient.putObject(PutObjectArgs.builder().bucket("ojbk").object("ojbk.log").stream(inputStream, objectSize, partSize).build()); + //bucket 创建 plus 空文件夹 + //minioClient.putObject(PutObjectArgs.builder().bucket("ojbk").object("plus/").stream(new ByteArrayInputStream(new byte[] {}), 0, -1).build()); + // 获取我们 刚刚上传的文件的url + String url = minioClient.getPresignedObjectUrl(GetPresignedObjectUrlArgs.builder().method(Method.GET).expiry(1, TimeUnit.HOURS).bucket("ojbk").object("ojbk.log").build()); + + System.err.println(url); + //下载 minio 中的文件 ojbect 对应服务器中的文件名字 ,filename 对应 下载存储的路径和文件名 + minioClient.downloadObject(DownloadObjectArgs.builder().bucket("ojbk").object("ojbk.log").filename("/home/wxm/webflux/666.log").build()); + + //删除文件 + //minioClient.removeObject(RemoveObjectArgs.builder().bucket("ojbk").object("ojbk.log").build()); + + } + + } + +``` + +# 更多详情 + + + https://docs.min.io/docs/java-client-api-reference + + + http://minio.github.io/minio-java/io/minio/MinioClient.html + + diff --git "a/minio/minio\345\210\233\345\273\272\347\224\250\346\210\267\345\210\206\351\205\215\346\235\203\351\231\220.md" "b/minio/minio\345\210\233\345\273\272\347\224\250\346\210\267\345\210\206\351\205\215\346\235\203\351\231\220.md" new file mode 100644 index 0000000..184fe98 --- /dev/null +++ "b/minio/minio\345\210\233\345\273\272\347\224\250\346\210\267\345\210\206\351\205\215\346\235\203\351\231\220.md" @@ -0,0 +1,73 @@ +# 分配新用户并赋予权限 + + 查看目前 local 存在的策略 + +>mc admin policy list local + + 查看readwrite格式 + +>mc admin policy info local readwrite + + 可以仿照readwrite 创建一个新策略 + +> vi minio-test.json + + 注意 json中不能有注释 复制请删除 + +```aidl + +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ // Action 表示(权限) + "s3:ListAllMyBuckets", // 查看所有的“桶”列表 + "s3:ListBucket", // 查看桶内的对象列表 + "s3:GetBucketLocation", + "s3:GetObject", // 下载对象 + "s3:PutObject", // 上传对象 + "s3:DeleteObject" // 删除对象 + ], + "Resource": [ + "arn:aws:s3:::*" // (应用到的资源, arn:aws:s3是命名空间 *表示所有) + // arn:aws:s3:::test/* 这样表示仅给予Bucket为test的权限 + ] + } + ] +} + + +``` + 压缩版 + +```aidl + +{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:ListAllMyBuckets","s3:ListBucket","s3:GetBucketLocation","s3:GetObject","s3:PutObject","s3:DeleteObject"],"Resource":["arn:aws:s3:::test/*"]}]} + +``` + + 将这个新建的test策略放进local策略 + +>mc admin policy add local test minio-test.json + + + 创建新用户 test 密码为test1234 + +>mc admin user add local test test1234 + + 为该test用户赋予test策略 + +> mc admin policy set local test user=test + + 禁用test用户 + +> mc admin user disable local test + + 删除test用户 + +>mc admin user remove local test + + 删除策略 + +>mc admin policy remove local test diff --git "a/minio/rclone\350\277\201\347\247\273minio\346\225\260\346\215\256.md" "b/minio/rclone\350\277\201\347\247\273minio\346\225\260\346\215\256.md" new file mode 100644 index 0000000..bb15db5 --- /dev/null +++ "b/minio/rclone\350\277\201\347\247\273minio\346\225\260\346\215\256.md" @@ -0,0 +1,51 @@ +# rclone 迁移minio数据 + + +两台机器的时区及时间要保持一致,最后进行迁移之前,两台机器的时间进行校准。 + +``` +centos 7设置时区 +timedatectl set-timezone Asia/Shanghai +#时间同步 +ntpdate ntp.aliyun.com +``` + +>curl https://rclone.org/install.sh | sudo bash + +rclone config +按照提示一步步,输入相应配置,即可。生成的文件位于:/root/.config/rclone目录下。 +也可以直接 编辑如下文件 + +> vi /root/.config/rclone/rclone.conf + +``` +[oldminio] +type = s3 +provider = Minio +env_auth = false +access_key_id = minio1 +secret_access_key = minio1@123 +region = cn-east-1 +endpoint = https://minio.exemple.com +location_constraint = +server_side_encryption = + +[newminio] +type = s3 +provider = Minio +env_auth = false +access_key_id = minio2 +secret_access_key = minio2@123 +region = cn-east-1 +endpoint = http://10.103.209.130:9000 +location_constraint = +server_side_encryption = + +``` + +>rclone sync -P oldminio:mybucket newminio:mybucket + + + 使用 sync 参数,同步有差异的数据 + -P 显示详细过程 + mybucket 桶名称,新minio没有则自动创建 diff --git a/mongo/mongo-GridFS.md b/mongo/mongo-GridFS.md new file mode 100644 index 0000000..dfb49e7 --- /dev/null +++ b/mongo/mongo-GridFS.md @@ -0,0 +1,161 @@ +# GridFS简介 + + GridFS是用于存储和检索超过16MB的BSON文档大小限制的文件的解决方案。 + + GridFS是MongoDB用来存储大型二进制文件的一种存储机制。 + + GridFS 不是将文件存储在单个文档中,而是将文件分为块,并将每个块作为单独的文档存储。 + 一般情况GridFS使用的块大小为256kb,最后一个块除外。 + + GridFS 使用两个集合存储文件,一个集合文件的块,另一个存储文件的原始数据。 + +# GridFS 优缺点 + +### 优点 + + 能够简化技术栈,如果已经使用了MongoDB,那么使用GridFS,就不需要其它独立的存储工具了 + + GridFS会自动平衡已有的复制,或者为MongoDB设置的自动分片,所以对文件存储做故障转移或者是横向扩展会更容易 。 + + GridFS的功能不错,能自动解决一些其他文件系统遇到的问题,如在同一个目录下存储大量的文件 + +### 缺点 + + 性能较低,不如直接访问文件系统快。 + + 无法修改文档。如果要修改GridFS里面的文档,只能是先删除再添加 + +## 要点 + + MongoDB 不会释放已经占用的硬盘空间。即使删除 db中的集合也不会释放磁盘空间。 + 因此 如果使用 GridFS 存储文件,从 GridFS 存储中删除无用的垃圾文件, + MongoDB 依然不会释放磁盘空间的。这会造成磁盘一直在消耗,而无法回收利用的问题。 + + 因此 这 几乎是这个 最大的弊端 所以很少人会用mongo 来存文件 + + +### 释放空间 + + 在mongo shell中运行 【二选一】 + + db.repairDatabase() + + 或者 + + db.runCommand({ repairDatabase: 1 }) + + 使用通过修复数据库方法回收磁盘时需要注意, + + 但修复磁盘的剩余空间必须大于等于存储数据集占用空间家伙加上 2G ,否则无法完成修复。 + + + +## 代码 + +``` + + import com.mongodb.MongoClient; + import com.mongodb.client.MongoDatabase; + import com.mongodb.client.gridfs.GridFSBucket; + import com.mongodb.client.gridfs.GridFSBuckets; + import org.springframework.beans.factory.annotation.Value; + import org.springframework.context.annotation.Bean; + import org.springframework.context.annotation.Configuration; + + @Configuration + public class MongoConfig { + @Value("${mongodb.database:ojbktest}") //这里是数据库的名称 + String db; + + @Bean + public GridFSBucket getGridFSBucket(MongoClient mongoClient) { + MongoDatabase database = mongoClient.getDatabase(db); + GridFSBucket bucket = GridFSBuckets.create(database); + return bucket; + } + } + +``` + + + + +``` + + import com.mongodb.client.gridfs.GridFSBucket; + import com.mongodb.client.gridfs.GridFSDownloadStream; + import com.mongodb.client.gridfs.model.GridFSFile; + import org.bson.types.ObjectId; + import org.junit.jupiter.api.Test; + import org.springframework.beans.factory.annotation.Autowired; + import org.springframework.boot.test.context.SpringBootTest; + import org.springframework.data.mongodb.core.query.Criteria; + import org.springframework.data.mongodb.core.query.Query; + import org.springframework.data.mongodb.gridfs.GridFsResource; + import org.springframework.data.mongodb.gridfs.GridFsTemplate; + + import java.io.*; + + @SpringBootTest + public class MongoApplicationTests { + + @Autowired + private GridFsTemplate gridFsTemplate; + + @Test + public void uploadFile() throws FileNotFoundException { + //要存储的文件 + File file = new File("D:\\ok.jpg"); + //定义输入流 + FileInputStream inputStram = new FileInputStream(file); + //向GridFS存储文件 + ObjectId objectId = gridFsTemplate.store(inputStram, "ok6.jpg"); + //得到文件ID + String fileId = objectId.toString(); + System.out.println(fileId); + } + + + @Autowired + private GridFSBucket gridFSBucket; + + @Test + public void downloadFile() throws IOException { + String fileId = "5f1fc01e6c97742656939265"; + //根据id查询文件 + GridFSFile gridFSFile = gridFsTemplate.findOne(Query.query(Criteria.where("_id").is(fileId))); + //打开下载流对象 + GridFSDownloadStream gridFSDownloadStream = gridFSBucket.openDownloadStream(gridFSFile.getObjectId()); + //创建gridFsResource,用于获取流对象 + GridFsResource gridFsResource = new GridFsResource(gridFSFile, gridFSDownloadStream); + //获取流中的数据 + InputStream inputStream = gridFsResource.getInputStream(); + File f1 = new File("D:\\usr\\ok.jpg"); + if (!f1.exists()) { + f1.getParentFile().mkdirs(); + } + byte[] bytes = new byte[1024]; + // 创建基于文件的输出流 + FileOutputStream fos = new FileOutputStream(f1); + int len = 0; + while ((len = inputStream.read(bytes)) != -1) { + fos.write(bytes, 0, len); + } + inputStream.close(); + fos.close(); + + } + + @Test + public void testDelFile() { + + String fileId = "5f1fc01e6c97742656939265"; + gridFsTemplate.delete(Query.query(Criteria.where("_id").is(fileId))); + + } + + + } + +``` + diff --git a/mongo/mongoBD/.gitignore b/mongo/mongoBD/.gitignore new file mode 100644 index 0000000..82eca33 --- /dev/null +++ b/mongo/mongoBD/.gitignore @@ -0,0 +1,25 @@ +/target/ +!.mvn/wrapper/maven-wrapper.jar + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/build/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ \ No newline at end of file diff --git a/mongo/mongoBD/.mvn/wrapper/maven-wrapper.jar b/mongo/mongoBD/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000..9cc84ea Binary files /dev/null and b/mongo/mongoBD/.mvn/wrapper/maven-wrapper.jar differ diff --git a/mongo/mongoBD/.mvn/wrapper/maven-wrapper.properties b/mongo/mongoBD/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000..b573bb5 --- /dev/null +++ b/mongo/mongoBD/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1 @@ +distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.5.3/apache-maven-3.5.3-bin.zip diff --git a/mongo/mongoBD/README.md b/mongo/mongoBD/README.md new file mode 100644 index 0000000..a591c1c --- /dev/null +++ b/mongo/mongoBD/README.md @@ -0,0 +1,20 @@ +# 项目 为 springboot 对mongodb 的一些简单操作 适合入门学习 + + +## 运行 MongoBdApplication.java +##打开浏览器 输入下面的 地址 + + + +# 新建一个wxm 用户 +http://127.0.0.1:1024/newuser +# 获取 wxm这个 用户 +http://127.0.0.1:1024/getwxm +# 获取所有用户 +http://127.0.0.1:1024/getuser +# 更新wxm 这个用户 +http://127.0.0.1:1024/updatewxm +# 删除 wxm +http://127.0.0.1:1024/delwxm + + 代码是写死的 ,在 MongoController.java 中可以修改下参数 可以自己 瞎几把玩。 diff --git a/mongo/mongoBD/mvnw b/mongo/mongoBD/mvnw new file mode 100644 index 0000000..5bf251c --- /dev/null +++ b/mongo/mongoBD/mvnw @@ -0,0 +1,225 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven2 Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Migwn, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" + # TODO classpath? +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +echo $MAVEN_PROJECTBASEDIR +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/mongo/mongoBD/mvnw.cmd b/mongo/mongoBD/mvnw.cmd new file mode 100644 index 0000000..019bd74 --- /dev/null +++ b/mongo/mongoBD/mvnw.cmd @@ -0,0 +1,143 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven2 Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" + +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/mongo/mongoBD/pom.xml b/mongo/mongoBD/pom.xml new file mode 100644 index 0000000..2a21568 --- /dev/null +++ b/mongo/mongoBD/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + + ojbk + mongoBD + 1.0 + jar + + mongoBD + plus.ojbk + + + org.springframework.boot + spring-boot-starter-parent + 1.5.10.RELEASE + + + + + UTF-8 + UTF-8 + 1.8 + + + + + org.springframework.boot + spring-boot-starter-data-mongodb + + + + org.springframework.boot + spring-boot-starter-test + test + + + org.springframework.boot + spring-boot-starter-web + + + com.alibaba + fastjson + 1.2.83 + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/snapshot + + true + + + + spring-milestones + Spring Milestones + https://repo.spring.io/milestone + + false + + + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/snapshot + + true + + + + spring-milestones + Spring Milestones + https://repo.spring.io/milestone + + false + + + + + + diff --git a/mongo/mongoBD/src/main/java/plus/ojbk/MongoBdApplication.java b/mongo/mongoBD/src/main/java/plus/ojbk/MongoBdApplication.java new file mode 100644 index 0000000..fb6884a --- /dev/null +++ b/mongo/mongoBD/src/main/java/plus/ojbk/MongoBdApplication.java @@ -0,0 +1,12 @@ +package plus.ojbk; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication(scanBasePackages = {"plus.ojbk.mongo.*"}) +public class MongoBdApplication { + + public static void main(String[] args) { + SpringApplication.run(MongoBdApplication.class, args); + } +} diff --git a/mongo/mongoBD/src/main/java/plus/ojbk/mongo/controller/MongoController.java b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/controller/MongoController.java new file mode 100644 index 0000000..b63c4d1 --- /dev/null +++ b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/controller/MongoController.java @@ -0,0 +1,61 @@ +package plus.ojbk.mongo.controller; + +import java.util.List; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import com.alibaba.fastjson.JSON; + +import plus.ojbk.mongo.entity.User; +import plus.ojbk.mongo.service.UserService; + +@RestController +public class MongoController { + + @Autowired + private UserService userService; + + @GetMapping("/newuser") + public Object a() { + User user = new User(); + user.setUserName("wxm"); + user.setEmail("i@ojbk.plus"); + user.setPassWord("12346"); + user.setPhoneNumber("17666666666"); + userService.createUser(user); + return "ojbk"; + } + + @GetMapping("/getuser") + public Object b() { + List user = userService.getAllUser(); + String str = JSON.toJSONString(user); + return str; + } + + @GetMapping("/getwxm") + public Object c() { + User user = userService.getUserByUserName("wxm"); + String json =JSON.toJSONString(user); + return json; + } + + @GetMapping("/updatewxm") + public Object d() { + User user = new User(); + user.setUserName("wxm"); + user.setPassWord("6666"); + user.setPhoneNumber("18000000000"); + long u = userService.updateUser(user); + return u; + } + + @GetMapping("/delwxm") + public Object e() { + User user = userService.getUserByUserName("wxm"); + long num = userService.deleteUserById(user.getId()); + return num; + } +} diff --git a/mongo/mongoBD/src/main/java/plus/ojbk/mongo/entity/User.java b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/entity/User.java new file mode 100644 index 0000000..d465290 --- /dev/null +++ b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/entity/User.java @@ -0,0 +1,202 @@ +package plus.ojbk.mongo.entity; + +import java.io.Serializable; +import java.util.Date; + +import org.springframework.data.annotation.Id; +import org.springframework.data.mongodb.core.mapping.Document; +import org.springframework.data.mongodb.core.mapping.Field; + +@Document(collection = "ojbk_user") +public class User implements Serializable { + private static final long serialVersionUID = 1L; + + @Id + private String id; // 用户编号 + + @Field(value = "name") + private String userName; // 用户名 + + @Field(value = "password") + private String passWord; // 密码 + + @Field(value = "phone") + private String phoneNumber; // 电话号码 + + @Field(value = "email") + private String email; // 邮箱 + + @Field(value = "modify_date") + private Date modifyDate; // 修改时间 + + @Field(value = "modify_user") + private String modifyUser; // 修改人 + + @Field(value = "add_date") + private Date addDate; // 添加时间 + + @Field(value = "add_user") + private String addUser; // 添加人 + + @Field(value = "enabled") + private Integer enabled; // 是否处于可用状态 + + @Field(value = "logo") + private String logo; // 头像 23333 + + /** + * @return the id + */ + public String getId() { + return id; + } + + /** + * @param id the id to set + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return the userName + */ + public String getUserName() { + return userName; + } + + /** + * @param userName the userName to set + */ + public void setUserName(String userName) { + this.userName = userName; + } + + /** + * @return the passWord + */ + public String getPassWord() { + return passWord; + } + + /** + * @param passWord the passWord to set + */ + public void setPassWord(String passWord) { + this.passWord = passWord; + } + + /** + * @return the phoneNumber + */ + public String getPhoneNumber() { + return phoneNumber; + } + + /** + * @param phoneNumber the phoneNumber to set + */ + public void setPhoneNumber(String phoneNumber) { + this.phoneNumber = phoneNumber; + } + + /** + * @return the email + */ + public String getEmail() { + return email; + } + + /** + * @param email the email to set + */ + public void setEmail(String email) { + this.email = email; + } + + /** + * @return the modifyDate + */ + public Date getModifyDate() { + return modifyDate; + } + + /** + * @param modifyDate the modifyDate to set + */ + public void setModifyDate(Date modifyDate) { + this.modifyDate = modifyDate; + } + + /** + * @return the modifyUser + */ + public String getModifyUser() { + return modifyUser; + } + + /** + * @param modifyUser the modifyUser to set + */ + public void setModifyUser(String modifyUser) { + this.modifyUser = modifyUser; + } + + /** + * @return the addDate + */ + public Date getAddDate() { + return addDate; + } + + /** + * @param addDate the addDate to set + */ + public void setAddDate(Date addDate) { + this.addDate = addDate; + } + + /** + * @return the addUser + */ + public String getAddUser() { + return addUser; + } + + /** + * @param addUser the addUser to set + */ + public void setAddUser(String addUser) { + this.addUser = addUser; + } + + /** + * @return the enabled + */ + public Integer getEnabled() { + return enabled; + } + + /** + * @param enabled the enabled to set + */ + public void setEnabled(Integer enabled) { + this.enabled = enabled; + } + + /** + * @return the logo + */ + public String getLogo() { + return logo; + } + + /** + * @param logo the logo to set + */ + public void setLogo(String logo) { + this.logo = logo; + } + + +} diff --git a/mongo/mongoBD/src/main/java/plus/ojbk/mongo/service/UserService.java b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/service/UserService.java new file mode 100644 index 0000000..d3d0bd3 --- /dev/null +++ b/mongo/mongoBD/src/main/java/plus/ojbk/mongo/service/UserService.java @@ -0,0 +1,148 @@ +package plus.ojbk.mongo.service; + +import java.util.Date; +import java.util.List; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Sort; +import org.springframework.data.domain.Sort.Direction; +import org.springframework.data.mongodb.core.MongoTemplate; +import org.springframework.data.mongodb.core.query.Criteria; +import org.springframework.data.mongodb.core.query.Query; +import static org.springframework.data.mongodb.core.query.Criteria.where; +import static org.springframework.data.mongodb.core.query.Query.query; +import org.springframework.data.mongodb.core.query.Update; +import org.springframework.stereotype.Service; +import org.springframework.util.DigestUtils; + +import plus.ojbk.mongo.entity.User; + +/** + * + * @author 王小明 + * 示例代码 偷懒啦 + */ + +@Service +public class UserService { + + @Autowired + private MongoTemplate mongoTemplate; + + /** + * 查询用户 + */ + public User getUserByUserName(String username) { + Query query = Query.query(Criteria.where("name").is(username)); + return this.mongoTemplate.findOne(query, User.class); + } + + /** + * 创建用户 + */ + public void createUser(User u) { + User user = new User(); + user.setUserName(u.getUserName()); + user.setPassWord(DigestUtils.md5DigestAsHex(DigestUtils.md5DigestAsHex(u.getPassWord().getBytes()).getBytes())); + user.setPhoneNumber(u.getPhoneNumber()); + user.setEmail(u.getEmail()); + user.setAddDate(new Date()); + user.setEnabled(u.getEnabled());; + this.mongoTemplate.save(user); + } + + /** + * 编辑 用户 + */ + public long updateUser(User user) { + Query query = new Query(Criteria.where("name").is(user.getUserName())); //正常应该用 _id 来进行编辑用户这里偷懒了 + Update update = new Update(); + if (user.getEmail() != null) { + update.set("email", user.getEmail()); + } + if (user.getPassWord() != null) { + update.set("password", DigestUtils.md5DigestAsHex(DigestUtils.md5DigestAsHex(user.getPassWord().getBytes()).getBytes())); + } + if (user.getEnabled() != null) { + update.set("phone", user.getPhoneNumber()); + } + if (user.getEnabled() != null) { + update.set("enabled", user.getEnabled()); + } + update.set("modify_date", new Date()); + update.set("modify_user", "admin"); + return this.mongoTemplate.updateFirst(query, update, User.class).getN(); //getN()获取执行条数 + } + + /** + * 删除用户 + */ + public long deleteUserById(String id) { + Query query = new Query(Criteria.where("_id").is(id)); + return this.mongoTemplate.remove(query, User.class).getN(); + } + + /** + * 查询全部用户 + */ + public List getAllUser() { + Query query = new Query(); + return this.mongoTemplate.find(query, User.class); + } + + /** + * 这里使用静态导包 + * import static org.springframework.data.mongodb.core.query.Criteria.where; + * import static org.springframework.data.mongodb.core.query.Query.query; + * + * 一下示例 给出 查询 num大于 某一个值的 全部用户 + * gt 相当于 〉 + * gte 相当于 〉= + * lt 相当于 〈 + * lte 相当于 〈 = + * is 相当于 == + * ne 相当于 != + * in 相当于 sql中的in + * nin 相当于 not in + * orOperator 接受多个条件,组成or逻辑 + */ + public List getUser(Integer num) { + Criteria criteria = where("num").gt(num); + Query query = query(criteria); + // 上面两句可以简写 成下面这句 + //Query query = query(where("num").gt(num)); + return this.mongoTemplate.find(query, User.class); + } + + /** + * 模糊查询 (通过邮箱模糊查询用户) + * 这里是 示例仅仅是个小栗子qwq + */ + public List getUserByEmailKey(String key){ + Query query = query(where("email").regex(".*"+key+".*")); + return this.mongoTemplate.find(query, User.class); + } + + /** + * 倒叙查询 加入 query.with(new Sort(Direction.DESC, "add_date")); + */ + public List getSortUserByEmailKey(String key){ + Query query = query(where("email").regex(".*"+key+".*")); + query.with(new Sort(Direction.DESC, "add_date")); + return this.mongoTemplate.find(query, User.class); + } + + /** + * 分页查询 加入 + * query.skip(0); 从第几条开始 【版本不同 有的版本是 表示第几页】 + * query.limit(10); 每页显示的条数 + * + */ + public List getSortUserByEmailKeys(String key){ + Query query = query(where("email").regex(".*"+key+".*")); + query.skip(0); + query.limit(10); + query.with(new Sort(Direction.DESC, "add_date")); + return this.mongoTemplate.find(query, User.class); + } +} diff --git a/mongo/mongoBD/src/main/resources/application.properties b/mongo/mongoBD/src/main/resources/application.properties new file mode 100644 index 0000000..6a93233 --- /dev/null +++ b/mongo/mongoBD/src/main/resources/application.properties @@ -0,0 +1,9 @@ +# server +server.port=1024 +spring.http.encoding.charset=UTF-8 +spring.http.encoding.enabled=false +# datasource +spring.data.mongodb.uri=mongodb://ojbkman:veryojbk@140.***.***.83:27017/ojbk + +# log +logging.config=classpath:logback-config.xml diff --git a/mongo/mongoBD/src/main/resources/logback-config.xml b/mongo/mongoBD/src/main/resources/logback-config.xml new file mode 100644 index 0000000..637a62c --- /dev/null +++ b/mongo/mongoBD/src/main/resources/logback-config.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + ${PATTERN-2} + UTF-8 + + System.out + + + + + + \ No newline at end of file diff --git a/mongo/mongoBD/src/test/java/plus/ojbk/MongoBdApplicationTests.java b/mongo/mongoBD/src/test/java/plus/ojbk/MongoBdApplicationTests.java new file mode 100644 index 0000000..b551a4f --- /dev/null +++ b/mongo/mongoBD/src/test/java/plus/ojbk/MongoBdApplicationTests.java @@ -0,0 +1,16 @@ +package plus.ojbk; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class MongoBdApplicationTests { + + @Test + public void contextLoads() { + } + +} diff --git "a/mongo/mongo\345\217\257\350\247\206\345\214\226\345\267\245\345\205\267\344\275\277\347\224\250.md" "b/mongo/mongo\345\217\257\350\247\206\345\214\226\345\267\245\345\205\267\344\275\277\347\224\250.md" new file mode 100644 index 0000000..a195ca6 --- /dev/null +++ "b/mongo/mongo\345\217\257\350\247\206\345\214\226\345\267\245\345\205\267\344\275\277\347\224\250.md" @@ -0,0 +1,32 @@ +---------------------------- + mongo - 可视化工具的使用 +---------------------------- + +1.工具下载地址 + + 【https://download.robomongo.org/1.2.1/windows/robo3t-1.2.1-windows-x86_64-3e50a65.exe】 + 【https://download.robomongo.org/1.2.1/windows/robo3t-1.2.1-windows-x86_64-3e50a65.zip】 + + 原先叫Robomongo 貌似最近改名 了? 喵喵喵? 变成了Robo 3T + + + +2. 我们下载免安装版 zip + + 下载完 解压 进入目录 点击 robo3t.exe + + 点击我同意的radio ,信息啥省略 然后一路下一步就完事了。 + + +3. 图片教程 +![image](https://github.com/xx13295/wxm/blob/master/images/robo1.png?raw=true) +![image](https://github.com/xx13295/wxm/blob/master/images/robo2.png?raw=true) +![image](https://github.com/xx13295/wxm/blob/master/images/robo3.png?raw=true) +![image](https://github.com/xx13295/wxm/blob/master/images/robo4.png?raw=true) +![image](https://github.com/xx13295/wxm/blob/master/images/robo5.png?raw=true) + + +---------------------------- + 当然 工具有很多 选择适合自己的 就行啦 +---------------------------- + diff --git "a/mongo/mongo\345\256\211\350\243\205\345\217\212\344\275\277\347\224\250.md" "b/mongo/mongo\345\256\211\350\243\205\345\217\212\344\275\277\347\224\250.md" new file mode 100644 index 0000000..1c9d162 --- /dev/null +++ "b/mongo/mongo\345\256\211\350\243\205\345\217\212\344\275\277\347\224\250.md" @@ -0,0 +1,238 @@ +---------------------------- + mongo - 安装 及使用教程 | +---------------------------- + +1. 下载 源码 【 * https://www.mongodb.com/download-center?jmp=nav#community】 可从官网时刻获取最新版 + + wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel62-3.6.5.tgz + +2. 解压源码包 + + tar zxvf mongodb-linux-x86_64-rhel62-3.6.5.tgz + +3. 重命名 进入 mongodb 并创建 数据目录 和 日志目录 + + mv mongodb-linux-x86_64-rhel62-3.6.5 mongodb + + cd mongodb + + mkdir db + + mkdir logs + +4. 进入 mongodb 中 bin 目录创建 mongo的配置文件 + + cd bin + + vi mongodb.conf + +5. 在 mongodb.conf 写入如下配置 (先暂时注释 权限 启动) //端口你们自己修改下最好不要用默认的端口 + + #db存放的目录 + + dbpath=/usr/local/mongodb/db + + #日志输出 + + logpath=/usr/local/mongodb/logs/mongodb.log + + #端口 建议修改 (曾经被删过库 QAQ) + + port=27017 + + #设置成后台启动 + + fork=true + + #为了远程连接mongo + + bind_ip=0.0.0.0 + + #权限启动 需要认证 + + #auth=true + + + **** 保存 ESC :wq + +6. 将配置好的 mongo 复制 到 /usr/local/ 目录下 + + 因为我们现在 在mongodb的bin 目录中 所以 使用两次 cd .. 命令 + + cd .. + + cd .. + + 开始复制 + + sudo cp -R mongodb /usr/local/ + +7. 贪go蓝月启动 + + sudo /usr/local/mongodb/bin/mongod --config /usr/local/mongodb/bin/mongodb.conf + + +8. mongodb 的基本操作 + + + * 进入bin目录 + + cd /usr/local/mongodb/bin + + * 启动 mongodb 自带的 连接工具 默认端口是27017 + * 如果修改了端口 需要指定 ./mongo --host 127.0.0.1 --port ${你的端口} + + 教程中使用的 是27017 为默认的 。 所以 在bin目录中 直接使用 ./mongo 连接 + + ./mongo + + * '>' 这个符号 不要输入啊。 为了演示 连接状态 233333 + + 在admin数据库中创建一个 管理员 账户 + > use admin + > db.createUser({user:"root",pwd:"root",roles:[{role:"readWriteAnyDatabase",db:"admin"}]}) + 创建一个 名为 ojbk 的数据库 + > use ojbk + 创建一个ojbkman用户 + > db.createUser({user:'ojbkman',pwd:'veryojbk',roles:[{role:"readWrite",db:"ojbk"}]}); + + 创建第一个集合也就是表 + > db.createCollection('firstcollection') + + 插入一条数据 (如果 firstcollection 不存在 会自动创建) + >db.firstcollection.insert({"msg":"ojbk.plus","code":"666"}) + + + /** + * + * 嗯? 好像忘记了 一件事 , 因为我们已经建好 用户了。 刚才 在mongo的配置文件中 是注释了一个 auth=true 的 + * 所以 现在我们要 去掉它前面的 # 重启一下mongo。 + * + * 这时候你先退出 > exit + * + * sudo vi /usr/local/mongodb/bin/mongodb.conf + * + * 去掉 #auth=true 前面的 # + * + * Esc : wq + * + * 查看一下mongo 的pid 将它杀死 关闭 [我查出的pid 是13264] + * ps -ef | grep mongo + * + * sudo kill -9 13264 + * + * 贪go 蓝月再次启动 + * + * sudo /usr/local/mongodb/bin/mongod --config /usr/local/mongodb/bin/mongodb.conf + * + */ + + + 再次 使用 ./mongo 连接 + + > use admin + + > db.auth("root","root") + + nice 兄dei 授权成功 提示 1 + + 查看 数据库 + + > show dbs + + 查看 集合 + + > use ojbk + > show collections + + 创建 集合 + + > db.test.insert({"msg":"ojbk","code":"999"}) + + 查询 //一条就findOne() + + > db.getCollection('test').find({}) + + 条件查询 + + > db.getCollection('test').find({"msg":"ojbk"}) + + + 修改 + + > db.test.update({"msg":"ojbk"},{$set:{"msg":"update"}},true,false) + + **别 误会 啊 上面的 更新语句 说的是 将msg为ojbk的这条记录修改成 update 不是说要把原来的值写出来 ! + + > db.test.update({"code":"999"},{$set:{"msg":"ppp"}},true,false) + + + 删除 + + > db.test.remove({"code":"999"}) + + 如果不指定条件 会删除全部 数据 .... + + > db.test.remove({}) + + 删库 跑路 + + >db.dropDatabase() + + 嗯 权限不够呢, 权限不够我还不能 删你库了? + >exit + sudo rm -rf /* + + + + +---------------------------- + update - 详细教程 | +---------------------------- + + db.collection.update($(查询的条件),$(更新操作符及新的值),$(upsert),$(multi) ) + + upsert: 若为true 则如果更新的记录不存在就插入,默认为false 不插入。 + multi:若为true 则根据条件查询出来的记录全部更新,默认是false 只更新找到的第一条记录。 + + 首先创建 一个 boy集合并初始化数据 + db.boy.insert({"name":"xiaoming"}) + + + 更新操作符: + 1.$inc + 用法:{$inc:{field:value}} + 作用:对一个数字字段的某个field增加value + 示例:db.boy.update({name:"xiaoming"},{$inc:{age:5}}) + + 2.$set + 用法:{$set:{field:value}} + 作用:把文档中某个字段field的值设为value + 示例:db.boy.update({name:"xiaoming"},{$set:{age:23}}) + + 3.$unset + 用法:{$unset:{field:1}} + 作用:删除某个字段field + 示例: db.boy.update({name:"xiaoming"},{$unset:{age:1}}) + + 4.$push + 用法:{$push:{field:value}} + 作用:把value追加到field里。注:field只能是数组类型,如果field不存在,会自动插入一个数组类型 + 示例:db.boy.update({name:"xiaoming"},{$push:{"like":"pornhub"}}) + + 5.$rename + 用法:{$rename:{old_name:new_name}} + 作用:对字段进行重命名(不是值,是字段) + 示例:db.boy.update({name:"xiaoming"},{$rename:{"name":"username"}}) + + + 用户权限说明 + + readAnyDatabase 任何数据库的只读权限(和read相似) + + readWriteAnyDatabase 任何数据库的读写权限(和readWrite相似) + + userAdminAnyDatabase 任何数据库用户的管理权限(和userAdmin相似) + + dbAdminAnyDatabase 任何数据库的管理权限(dbAdmin相似) + diff --git "a/mongo/mongo\347\264\242\345\274\225.md" "b/mongo/mongo\347\264\242\345\274\225.md" new file mode 100644 index 0000000..f73c882 --- /dev/null +++ "b/mongo/mongo\347\264\242\345\274\225.md" @@ -0,0 +1,317 @@ +---------------------------- + mongo - 索引使用教程 | +---------------------------- + +### mongo索引的使用 + + 我们知道常用的mysql等数据库有索引,那mongo是否也有索引呢? + + 答案是肯定的。 + +### 建立索引 + + +假如我们有如下数据: + + { + "_id" : ObjectId("5dc375c0acbff831b828a906"), + "_class" : "plus.ojbk.web.controller.test", + "title" : "adidas 阿迪 T 恤 10010-20", + "catalog" : "运动", + "brand" : "阿迪达斯", + "price" : 199, + "specs" : [ + { + "name" : "材质", + "value" : "涤纶" + }, + { + "name" : "上市", + "value" : "2019 秋季" + } + ] + } + + + + +>db.collection.createIndex(key, options) + + +>db.getCollection('test_a').createIndex({"specs":NumberInt(1)},{"name":"test.specs","background":true}) + + test_a --> 为集合名称 ,也就是表。 + + {"specs":NumberInt(1)} --> specs 为需要建立索引的key名, vlaue 可以是 NumberInt(1) 或NumberInt(-1) , 1表示升序,-1降序。 + + {"name":"test.specs","background":true} --> test.specs 为当前索引的别名, background值为true 代表后台建立这个索引。 + + +##### options 可选参数 + +| 参数 |类型 | 描述 | +| -------- | -------- | -------- | +|name |String |索引名称,默认是:字段名_排序类型 开始排序 | +|background |boolean |创建索引在后台运行,不会阻止其他对数据库操作 | +|unique |boolean |创建唯一索引,不会出现重复的文档值 | +|sparse |boolean |过滤掉null,不存在的字段 | +|expireAfterSeconds|Integer|指定一个以秒为单位的数值,完成 TTL设定,设定集合的生存时间| +|v |Index version|索引的版本号。默认的索引版本取决于mongod创建索引时运行的版本| +|weights|document |索引权重值,数值在 1 到 99,999 之间,表示该索引相对于其他索引字段的得分权重。| +|default_language |String |对于文本索引,该参数决定了停用词及词干和词器的规则的列表。 默认为英语| +|language_override |String |对于文本索引,该参数指定了包含在文档中的字段名,语言覆盖默认的language,默认值为 language.| + + + +### 查询集合索引以及索引大小 + +>db.collection.getIndexes() + +>db.getCollection('test_a').getIndexes() + + [ + { + "v" : 1, + "key" : { + "_id" : 1 + }, + "name" : "_id_", + "ns" : "ojbktest.test_a" + }, + { + "v" : 1, + "key" : { + "title" : 1.0 + }, + "name" : "test.title", + "ns" : "ojbktest.test_a", + "background" : true + } + ] + + 一个是主键id 索引,另一个是我们手动创建的title索引 + + + + + 查看索引大小 + +>db.getCollection('test_a').totalIndexSize() + +### 删除索引 + + 删除指定的索引 + +>db.collection.dropIndex(indexName) + + +>db.getCollection('test_a').dropIndex("test.title") + + + 删除 除了_id_ 以外所有其他手动创建的索引。 + +>db.collection.dropIndexes() + +>db.getCollection('test_a').dropIndexes() + +### 查看查询语句是否使用索引 + +>db.collection.find().explain() + +>db.getCollection('test_a').find({catalog:"运动"}).explain() + + + { + "queryPlanner" : { + "plannerVersion" : 1, + "namespace" : "ojbktest.test_a", + "indexFilterSet" : false, + "parsedQuery" : { + "catalog" : { + "$eq" : "运动" + } + }, + "winningPlan" : { + "stage" : "FETCH", + "inputStage" : { + "stage" : "IXSCAN", + "keyPattern" : { + "catalog" : 1 + }, + "indexName" : "test.catalog", + "isMultiKey" : false, + "isUnique" : false, + "isSparse" : false, + "isPartial" : false, + "indexVersion" : 1, + "direction" : "forward", + "indexBounds" : { + "catalog" : [ + "[\"运动\", \"运动\"]" + ] + } + } + }, + "rejectedPlans" : [] + }, + "serverInfo" : { + "host" : "ojbk", + "port" : 50168, + "version" : "3.2.4", + "gitVersion" : "e2ee9ffcf9f5a94fad76802e28cc978718bb7a30" + }, + "ok" : 1.0 + } + + +由上面的结果可知 是用到了索引 "test.catalog" + +#### stage 对照表 + +| 参数 | 描述 | +| -------- | -------- | +|COLLSCAN | 全表扫描 | +|IXSCAN |扫描索引 | +|FETCH |根据索引去检索指定document | +|SHARD_MERGE |将各个分片返回数据进行merge | +|SORT |表明在内存中进行了排序 | +|LIMIT |使用limit限制返回数 | +|SKIP |使用skip进行跳过 | +|IDHACK |针对_id进行查询 | +|SHARDING_FILTER |通过mongos对分片数据进行查询 | +|COUNT |利用db.coll.explain().count()之类进行count运算 | +|COUNTSCAN |count不使用Index进行count时的stage返回 | +|COUNT_SCAN |count使用了Index进行count时的stage返回 | +|SUBPLA |未使用到索引的$or查询的stage返回 | +|TEXT |使用全文索引进行查询时候的stage返回 | +|PROJECTION |限定返回字段时候stage的返回 | + + +##### 期望查询使用到索引的stage结果 + + Fetch+IDHACK + + Fetch+ixscan + + Limit+(Fetch+ixscan) + + PROJECTION+ixscan + + SHARDING_FITER+ixscan + + COUNT_SCAN + +##### 不期望的结果 + + COLLSCAN(全表扫描), + SORT(使用sort但是无index), + 不合理的SKIP, + SUBPLA(未用到index的$or), + COUNTSCAN(不使用index进行count) + + + +#### explain() 还可以设置参数 + + executionStats + + allPlansExecution + + +>db.getCollection('test_a').find({catalog:"运动"}).explain("executionStats") + + { + "queryPlanner" : { + "plannerVersion" : 1, + "namespace" : "ojbktest.test_a", + "indexFilterSet" : false, + "parsedQuery" : { + "catalog" : { + "$eq" : "运动" + } + }, + "winningPlan" : { + "stage" : "FETCH", + "inputStage" : { + "stage" : "IXSCAN", + "keyPattern" : { + "catalog" : 1 + }, + "indexName" : "test.catalog", + "isMultiKey" : false, + "isUnique" : false, + "isSparse" : false, + "isPartial" : false, + "indexVersion" : 1, + "direction" : "forward", + "indexBounds" : { + "catalog" : [ + "[\"运动\", \"运动\"]" + ] + } + } + }, + "rejectedPlans" : [] + }, + "executionStats" : { + "executionSuccess" : true, + "nReturned" : 1, + "executionTimeMillis" : 0, + "totalKeysExamined" : 1, + "totalDocsExamined" : 1, + "executionStages" : { + "stage" : "FETCH", + "nReturned" : 1, + "executionTimeMillisEstimate" : 0, + "works" : 2, + "advanced" : 1, + "needTime" : 0, + "needYield" : 0, + "saveState" : 0, + "restoreState" : 0, + "isEOF" : 1, + "invalidates" : 0, + "docsExamined" : 1, + "alreadyHasObj" : 0, + "inputStage" : { + "stage" : "IXSCAN", + "nReturned" : 1, + "executionTimeMillisEstimate" : 0, + "works" : 2, + "advanced" : 1, + "needTime" : 0, + "needYield" : 0, + "saveState" : 0, + "restoreState" : 0, + "isEOF" : 1, + "invalidates" : 0, + "keyPattern" : { + "catalog" : 1 + }, + "indexName" : "test.catalog", + "isMultiKey" : false, + "isUnique" : false, + "isSparse" : false, + "isPartial" : false, + "indexVersion" : 1, + "direction" : "forward", + "indexBounds" : { + "catalog" : [ + "[\"运动\", \"运动\"]" + ] + }, + "keysExamined" : 1, + "dupsTested" : 0, + "dupsDropped" : 0, + "seenInvalidated" : 0 + } + } + }, + "serverInfo" : { + "host" : "ojbk", + "port" : 50168, + "version" : "3.2.4", + "gitVersion" : "e2ee9ffcf9f5a94fad76802e28cc978718bb7a30" + }, + "ok" : 1.0 + } diff --git "a/mongo/\345\233\272\345\256\232\351\233\206\345\220\210\345\244\247\345\260\217.md" "b/mongo/\345\233\272\345\256\232\351\233\206\345\220\210\345\244\247\345\260\217.md" new file mode 100644 index 0000000..a8bdce9 --- /dev/null +++ "b/mongo/\345\233\272\345\256\232\351\233\206\345\220\210\345\244\247\345\260\217.md" @@ -0,0 +1,55 @@ +# MongoDB 固定集合(Capped Collections) + + MongoDB 固定集合(Capped Collections)是性能更出色且有固定大小的集合, + 对于大小固定,我们可以想象其就像一个环形队列,当集合空间用完后,在插入的元素就会覆盖最初始的头部的元素! + +# 创建固定集合 + 我们用过 createCollection 来创建一个固定集合,且 capped 选项设置为 true: + +>db.createCollection("cappedLogCollection",{capped:true,size:10000}) + + 还可以指定文档个数,加上max:1000属性: + +>db.createCollection("cappedLogCollection",{capped:true,size:10000,max:1000}) + + 判断集合是否为固定集合: + +>db.cappedLogCollection.isCapped() + + + 如果需要将已存在的集合转换为固定集合可以使用以下命令: + +>db.runCommand({"convertToCapped":"posts",size:10000}) + + + 以上代码将我们已存在的 posts 集合转换为固定集合。 + +# 固定集合查询 + + 固定集合文档按照插入顺序存储的,默认情况下查询就是按照插入顺序返回的,也可以使用$natural 调整返回顺序。 + +>db.cappedLogCollection.find().sort({$natural:-1}) + + 固定集合的功能特点 + 可以插入即更新,但更新不能超出 collection 的大小,否则更新失败,不允许删除, + 但是可以调用 drop()删除集合中的所有行,但是 drop 后需要显式的重建集合。 + + 在 32 位 机子上一个 capped collection 的最大值约为 482.5M,64位 上只受系统文件大小的限制。 + +# 固定集合属性及用法 + + 属性1:对固定集合进行插入速度极快 + 属性2:按照插入顺序的查询输出速度极快 + 属性3:能够在插入最新数据时,淘汰最早的数据 + + 用法1:储存日志信息。 + 用法2:缓存一些少量的文档。 + +>db.createCollection("cappedLogCollection",{capped:true,size:10000,max:1000}) + + size:是整个集合空间大小,单位为 KB + max:是集合文档个数上限,单位是 个 + + 如果空间大小到达上限,则插入下一则文档时,会覆盖第一个文档;如果文档个数到达上限, + 永远插入下一个文档时,会覆盖第一个文档。两个单数上限判断取的是 与 的逻辑。 + \ No newline at end of file diff --git a/mysql/README.md b/mysql/README.md new file mode 100644 index 0000000..e63a653 --- /dev/null +++ b/mysql/README.md @@ -0,0 +1,99 @@ +# Mysql 8 安装 + +### Mysql 8 下载地址 https://dev.mysql.com/downloads/repo/yum/ + + //下载 网络不通 请在 windows上 下载后 用rz 上传 + wget https://dev.mysql.com/get/mysql80-community-release-el7-1.noarch.rpm + + //安装 yum repo文件并更新 yum 缓存 + rpm -ivh mysql80-community-release-el7-1.noarch.rpm + + yum clean all + + yum makecache + + // 安装 + yum install mysql-community-server + + //启动前 + vi /etc/my.cnf + + 将#default-authentication-plugin=mysql_native_password 前面的# 去掉 保存 + + //启动 + systemctl start mysqld.service + + (查看 systemctl status mysqld ) + +### 配置账户 + + //查看root 用户 初始密码 + cat /var/log/mysqld.log | grep password + + //下面是打印结果 + +>2018-07-31T11:12:14.924719Z 5 [Note] MY-010454 [Server] A temporary password is generated for root@localhost: eoQA=_Qyj9=M + + //可以看出 密码为 eoQA=_Qyj9=M + //好了 我们用这个密码 登录 root 用户 并修改 他的初始密码 + + mysql -u root -p + + ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'Mysql8ojbk666!'; + + 注意你的密码复杂度要够! 不然修改不成功,大小写加数字加字符什么的完事了! + + 创建普通用户 + CREATE USER 'ojbk'@'%' IDENTIFIED BY 'Ojbkmima666!'; + + //创建一个名为 test 数据库 并分配给这个普通用户 + + CREATE DATABASE test; + GRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO 'ojbk'@'%'; + + //如果要 指定字符集 更多字符集 自己替换 不加则自动 创建 字符集utf8mb4 排序规则utf8mb4_0900_ai_ci + CREATE DATABASE test DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; + + + //修改 可以用 navicat 可以连接的密码方式 也可以在 创建的时候 直接 加上 [ WITH mysql_native_password ] + ALTER USER 'ojbk'@'%' IDENTIFIED WITH mysql_native_password BY 'Ojbkmima666!'; + + 【 + (如果要增加权限) + + //all:所有权限,这里有select,update等等权限,需要什么自己 加就完事了熬~ + //后面的*.*:指定数据库.指定表,这里是所有 ,具体哪个库自己指定就完事了熬 ~ + // to 后面就是你刚才创建的用户及连接域 + + GRANT all ON *.* TO 'ojbk'@'%'; + 用以上命令授权的用户不能给其它用户授权,如果想让该用户可以授权,用以下命令: + GRANT all ON databasename.tablename TO 'username'@'host' WITH GRANT OPTION; + 】 + +### 常用命令 + + + //登录mysql + mysql -u username -p + + //退出mysql + quit + + //启动mysql + systemctl start mysqld.service + + //关闭 + systemctl stop mysqld.service + + //重启 + systemctl restart mysqld.service + + //开机自启 + systemctl enable mysqld.service + + //查看mysql版本 + mysql -V + 连上了MySQL服务器就 select version(); + + //在执行命令时候出现警告 可查看警告 + show warnings; \ No newline at end of file diff --git "a/mysql/\347\273\204\345\220\210\347\264\242\345\274\225\345\210\233\345\273\272\350\246\201\347\202\271.md" "b/mysql/\347\273\204\345\220\210\347\264\242\345\274\225\345\210\233\345\273\272\350\246\201\347\202\271.md" new file mode 100644 index 0000000..d7b9fd2 --- /dev/null +++ "b/mysql/\347\273\204\345\220\210\347\264\242\345\274\225\345\210\233\345\273\272\350\246\201\347\202\271.md" @@ -0,0 +1,8 @@ +# 组合索引 创建要点 + + 组合索引中有多个字段,其中一个字段是有范围的查询,应将此字段放在最后面。 + + + + + \ No newline at end of file diff --git a/nginx/README.md b/nginx/README.md new file mode 100644 index 0000000..dcf364f --- /dev/null +++ b/nginx/README.md @@ -0,0 +1,49 @@ +# Nginx 安装 + +#### 安装前的小准备 + yum -y install gcc gcc-c++ autoconf automake + yum -y install zlib zlib-devel openssl openssl-devel pcre-devel + +#### nginx 下载地址 http://nginx.org/en/download.html + //下载 网络不通 请在 windows上 下载后 用rz 上传 + wget http://nginx.org/download/nginx-1.13.9.tar.gz + //解压 + tar zxvf nginx-1.13.9.tar.gz + //进入nginx目录 + cd nginx-1.13.9 + //编译 安装 + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module + make + sudo make install + + //软链接 这样不管在哪里都可以直接使用 nginx命令不需要进入 /usr/local/nginx/sbin目录 + sudo ln -s /usr/local/nginx/sbin/nginx /usr/bin/nginx + +# 常用命令 + + sudo nginx 启动 + + sudo nginx -s reload 配置文件变化后重新加载配置文件并重启nginx服务 + + sudo nginx -s stop 停止 + + sudo nginx -v 显示nginx的版本号 + sudo nginx -V 显示nginx的版本号和编译信息 + sudo nginx -t 检查nginx配置文件的正确性 + sudo nginx -T 检查nginx配置文件的正确定及配置文件的详细配置内容 + + +### 开启 http2 需要从新增加编译参数 + + ./configure --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_ssl_module --with-threads --with-http_gzip_static_module --with-http_sub_module --with-http_v2_module + + make + + sudo make install + + + 然后在nginx 的配置 文件 + 改为 listen 443 ssl http2; + + 重启 ojbk + \ No newline at end of file diff --git "a/nginx/nginx\345\261\217\350\224\275ip.md" "b/nginx/nginx\345\261\217\350\224\275ip.md" new file mode 100644 index 0000000..c64ac8b --- /dev/null +++ "b/nginx/nginx\345\261\217\350\224\275ip.md" @@ -0,0 +1,63 @@ + +# Nginx-屏蔽指定IP + +>cd usr/local/nginx/conf + +>mkdir deny + +>cd deny + +>vi ip.conf + +>deny 192.168.1.1; + +>Esc ---》 :wq + + + 这样就是对 192.168.1.1 这个ip 的禁止访问 + 再有ip 直接修改这个ip.conf 加入新的黑名单ip 就行啦。 + + +|示例|作用| +|:-|:-| +|deny 192.168.1.1;|#过滤单个IP| +|deny 192.168.1.0/24;|#过滤整个地址段| +|deny all; |#过滤所有IP| +|allow 192.168.1.1; | #与deny all;组合是指除192.168.1.1外其他都过滤| + + + +# 当然 需要 进入nginx.conf 中 include 一下这个配置 才会生效 + +>cd /usr/local/nginx/conf + +>vi nginx.conf + +加入以下这句 即可 + +>include usr/local/nginx/conf/deny/ip.conf; + +>Esc ---》 :wq + + 可以放到http, server, location, limit_except语句块 + + 一般放到 http{}标签末尾 + http{ + .....省略 + include /deny/ip.conf; + } + +然后重启 就ojbk + +对了。被禁止的ip会显示 403 + +可以在server{} 中加入 + + error_page 403 /403.html; + location = /403.html { + root html; + allow all; + } + +然后在 /usr/local/nginx/html 中 mkdir 一个403.html 页面 就会跳转到这里啦 + diff --git "a/nginx/nginx\345\274\200\345\220\257Gzip\345\216\213\347\274\251.md" "b/nginx/nginx\345\274\200\345\220\257Gzip\345\216\213\347\274\251.md" new file mode 100644 index 0000000..f510f4f --- /dev/null +++ "b/nginx/nginx\345\274\200\345\220\257Gzip\345\216\213\347\274\251.md" @@ -0,0 +1,47 @@ +# Nginx-Gzip压缩 + + 这个模块支持在线实时压缩输出数据流。 + 经过良好的配置优化,可以大幅的提升网站的输出效率。 + gzip是常见的一个压缩算法,是大部分浏览器都支持的算法 + 从HTTP请求头中可以看到浏览器支持的具体压缩算法 + 比较小的文件不要压缩,特别是二进制就根本别压缩了 + + +# Nginx-开启压缩 + + 常规配置 + +1. gzip on|off #是否开启gzip +2. gzip_buffers 32 4K| 16 8K #缓冲(压缩在内存中缓冲几块? 每块多大?) +3. gzip_comp_level [1-9] #推荐6 压缩级别(级别越高,压的越小,越浪费CPU计算资源) +4. gzip_disable #正则匹配UA 什么样的Uri不进行gzip +5. gzip_min_length 200 # 开始压缩的最小长度(再小就不要压缩了,意义不在) +6. gzip_http_version 1.0|1.1 # 开始压缩的http协议版本(可以不设置,目前几乎全是1.1协议) +7. gzip_proxied # 设置请求者代理服务器,该如何缓存内容 +8. gzip_types text/plain application/xml # 对哪些类型的文件用压缩 如txt,xml,html ,css +9. gzip_vary on|off # 是否传输gzip压缩标志 + + + +## 食用方法 + +>vi nginx.conf + + 找到 一个被注释的 # gzip on; 将它前面的# 去掉 + + 然后如下配置 + +>gzip on; + +>gzip_buffers 32 4k; + +>gzip_comp_level 6; + +>gzip_min_length 100; + +>gzip_types text/css text/xml text/html application/x-javascript; + +>Esc -----> :wq + + +>sudo nginx -s reload \ No newline at end of file diff --git "a/nginx/nginx\346\267\273\345\212\240service nginx restart\347\255\211\345\221\275\344\273\244.md" "b/nginx/nginx\346\267\273\345\212\240service nginx restart\347\255\211\345\221\275\344\273\244.md" new file mode 100644 index 0000000..6f59b5d --- /dev/null +++ "b/nginx/nginx\346\267\273\345\212\240service nginx restart\347\255\211\345\221\275\344\273\244.md" @@ -0,0 +1,119 @@ +## 增加 nginx 命令 + + 请使用 root 用户 或者获取到root 权限 + +>cd /etc/init.d/ +>vi nginx + + 然后 复制下面 一大溜 保存 + + 记得 使用 `chmod 755 nginx` + +>chkconfig --add nginx + + #!/bin/sh + # + # nginx - this script starts and stops the nginx daemin + # + # chkconfig: - 85 15 + # description: Nginx is an HTTP(S) server, HTTP(S) reverse \ + # proxy and IMAP/POP3 proxy server + # processname: nginx + # config: /usr/local/nginx/conf/nginx.conf + # pidfile: /usr/local/nginx/logs/nginx.pid + + # Source function library. + . /etc/rc.d/init.d/functions + + # Source networking configuration. + . /etc/sysconfig/network + + # Check that networking is up. + [ "$NETWORKING" = "no" ] && exit 0 + + nginx="/usr/local/nginx/sbin/nginx" + prog=$(basename $nginx) + + NGINX_CONF_FILE="/usr/local/nginx/conf/nginx.conf" + + lockfile=/var/lock/subsys/nginx + + start() { + [ -x $nginx ] || exit 5 + [ -f $NGINX_CONF_FILE ] || exit 6 + echo -n $"Starting $prog: " + daemon $nginx -c $NGINX_CONF_FILE + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval + } + + stop() { + echo -n $"Stopping $prog: " + killproc $prog -QUIT + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval + } + + restart() { + configtest || return $? + stop + start + } + + reload() { + configtest || return $? + echo -n $"Reloading $prog: " + killproc $nginx -HUP + RETVAL=$? + echo + } + + force_reload() { + restart + } + + configtest() { + $nginx -t -c $NGINX_CONF_FILE + } + + rh_status() { + status $prog + } + + rh_status_q() { + rh_status >/dev/null 2>&1 + } + + case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart|configtest) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}" + exit 2 + esac diff --git "a/nginx/nginx\351\200\217\344\274\240\345\272\224\347\224\250\350\207\252\345\256\232\344\271\211server\345\244\264.md" "b/nginx/nginx\351\200\217\344\274\240\345\272\224\347\224\250\350\207\252\345\256\232\344\271\211server\345\244\264.md" new file mode 100644 index 0000000..b0710eb --- /dev/null +++ "b/nginx/nginx\351\200\217\344\274\240\345\272\224\347\224\250\350\207\252\345\256\232\344\271\211server\345\244\264.md" @@ -0,0 +1,38 @@ +# nginx透传应用自定义server头 + +#### 举例 : + + Springboot应用 + +在配置文件中加入 +>server.server-header=PHP/9.9.8 +> +> + + +nginx 配置 增加 `proxy_pass_header Server;` 即可 +``` +server { + listen 8089; + server_name localhost; + + location / { + proxy_set_header HOST $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + proxy_pass_header Server; + + proxy_pass http://127.0.0.1:8080; + proxy_connect_timeout 600; + proxy_read_timeout 600; + } + } + + +``` + +这样别人使用f12查看到的 server 就不再是nginx了 而是php + +毕竟 php 是世界上最好的语言~ diff --git "a/nginx/nginx\351\205\215\347\275\256https.md" "b/nginx/nginx\351\205\215\347\275\256https.md" new file mode 100644 index 0000000..b9a380b --- /dev/null +++ "b/nginx/nginx\351\205\215\347\275\256https.md" @@ -0,0 +1,40 @@ +### nginx 配置https + + server { + listen 443; + server_name ojbk.plus www.ojbk.plus; + + ssl on; + ssl_certificate /usr/local/ssl/ojbk.plus/ojbk.plus.pem; + ssl_certificate_key /usr/local/ssl/ojbk.plus/ojbk.plus.key; + + access_log logs/ojbk.access.log main; + error_log logs/ojbk.error.log; + + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Requested-For $remote_addr; + proxy_set_header REMOTE-HOST $remote_addr; + + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + + location / { + proxy_pass http://127.0.0.1:8080; + proxy_connect_timeout 600; + proxy_read_timeout 600; + } + + } + + server { + listen 80; + server_name ojbk.plus www.ojbk.plus; + return 301 https://ojbk.plus$request_uri; + + + } \ No newline at end of file diff --git "a/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\212\240\350\275\275\345\244\261\350\264\245\351\227\256\351\242\230.md" "b/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\212\240\350\275\275\345\244\261\350\264\245\351\227\256\351\242\230.md" new file mode 100644 index 0000000..d83c228 --- /dev/null +++ "b/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\212\240\350\275\275\345\244\261\350\264\245\351\227\256\351\242\230.md" @@ -0,0 +1,37 @@ +# nginx 配置 导致静态资源加载失败 + +### 情景 +![image](https://github.com/xx13295/wxm/blob/master/images/nginx/nginx1.png?raw=true) + + 网站 使用了 nginx 做代理服务器,有时候访问页面的时候 会出现静态资源读取失败,导致网页样式乱七八糟 + + QAQ 这样 很 丑有没有 + + 再次刷新 会发现网页又正常访问了 + + 浏览网页时出现 ERR_CONTENT_LENGTH_MISMATCH + + 莫慌 这是由于nginx 对资源文件做了缓存处理 存在 nginx安装目录中 一个 proxy_temp 目录下 + + + +### 我们可以查看 nginx 错误 日志文件 如下图所示 +![image](https://github.com/xx13295/wxm/blob/master/images/nginx/nginx2.png?raw=true) + + 从图中 可 发现 权限不足问题 导致 本次访问失败 + 因为 大部分 nginx 配置 基本是默认的 + 所以 在nginx.conf 中 第二行 worker_processes 的前面一行 是这样的 #user nobody; + + +# 解决办法 + + 1. 首先 将 # 号去掉 将nobody 改为root 于是变成 user root; 按 Esc --> : wq + 2. cd 到 local 文件夹下 ,因为我的 nginx 在这个文件夹下 + 3. [blog@ojbk local]$ sudo chown -R root:root nginx + 4. [blog@ojbk local]$ sudo chmod -R 755 nginx + 5. [blog@ojbk local]$ sudo chmod -R u+s nginx + 6. [blog@ojbk conf]$ sudo nginx -s reload + + +理论上不可能还有问题了 如果有 那么就 sudo rm -rf /* ![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) + \ No newline at end of file diff --git "a/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\242\236\345\212\240\350\256\277\351\227\256\346\216\247\345\210\266.md" "b/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\242\236\345\212\240\350\256\277\351\227\256\346\216\247\345\210\266.md" new file mode 100644 index 0000000..61dfec6 --- /dev/null +++ "b/nginx/nginx\351\235\231\346\200\201\350\265\204\346\272\220\345\242\236\345\212\240\350\256\277\351\227\256\346\216\247\345\210\266.md" @@ -0,0 +1,57 @@ +# 增加访问控制 + + 通常部分小姐姐的资源放到服务器直接爆乳在公网不太合适 + 因此要是能加上账户密码访问就非常ojbk了。 + +# 静态资源如下配置 + + 监听一个23333端口 + localhost 可以改成你的静态资源访问域名 如static.xxx.com + 则浏览器访问就是 static.xxx.com:23333 + 如果下面直接监听80端口 就不需要增加额外的 :80 了 + + + + + server{ + listen 23333; + server_name localhost; + location / { + + auth_basic "ojbk"; #标识 + auth_basic_user_file /usr/local/nginx/conf/htpasswd; #用户名密码配置文件 + + root /usr/local/static; + autoindex on; # 开启索引 + charset utf-8; # 解决文件名称中文乱码的问题 + autoindex_exact_size on; # 显示文件大小 + autoindex_localtime on; # 显示最后修改时间 + } + } + + + +## 生成密码文件 + + +>cd /usr/local/nginx/conf + + 使用openssl生成密码 + +>openssl passwd ojbk + + 得到密文密码 + +>mhbz9pgglXQT6 + + 配置用户密码文件 + +>echo "admin:mhbz9pgglXQT6" > htpasswd + + +### 重新读取nginx 配置文件 + +>sudo nginx -s reload + + 此时 再次访问 static.xxx.com:23333 + 就需要输入账户名admin 密码ojbk 才能访问到 相应的静态资源了 diff --git "a/nginx/nginx\351\252\232\346\223\215\344\275\234.md" "b/nginx/nginx\351\252\232\346\223\215\344\275\234.md" new file mode 100644 index 0000000..bd87334 --- /dev/null +++ "b/nginx/nginx\351\252\232\346\223\215\344\275\234.md" @@ -0,0 +1,56 @@ +# 搞事搞事搞事: + 如今我们喜欢网上冲浪,要学会隐藏自己 ,使用nginx做代理服务器,我们访问网页按F12在network中可以查看相应的资源的response返回的header中存在 server:nginx/版本号 + 这个东西被黑客看到的话,基本上可以根据指定的版本寻找攻击了.所以我们要隐藏版本号或者返回一个自定义的server给客户端达到隐藏自己的效果. + + + +## 隐藏版本号: + 这个比较简单 +>vi nginx.conf + + 在nginx的 http{} 里加上 server_tokens off; + + http { + #……省略配置 + server_tokens off; #即可隐藏版本号 + #……省略配置 + } + + + 重启nginx 这时我们再看 Server 发现已经没有版本信息了,而且所有的错误页面 都只有nginx 没有显示多余的版本号 + + +## 自定义的server 骚操作 + + 如果你已经编译安装过了 请先删除掉 因为重新编译 如果文件已经存在 貌似是不会覆盖安装的。 + 这个坑已经帮你踩过啦。 + 我装这个地方 /usr/local/nginx ,于是 我将这个nginx 删除了。 当然删除前 你先保存一下你 的配置文件 !! 这很重要不然 一会你还得重新配。 + +### 修改 src/http/ngx_http_header_filter_module.c + + static char ngx_http_server_string[] = "Server: nginx" CRLF; + + 把其中的nginx改成 自己喜欢的都可以 我改成了 ojbk . 于是就变成了Server:ojbk. 这里配合 上面的隐藏版本号食用 + + +### 修改 src/core/nginx.h + + #define NGINX_VERSION "1.13.9" + #define NGINX_VER "nginx/" NGINX_VERSION + #define NGINX_VAR "NGINX" + + 我们把NGINX_VERSION大小定义为6.6.6 + nginx与NGINX改为 OJBK + + 保存一下 。然后 cd .. 再cd .. 从新编译安装一下 再把之前的配置文件拷贝进去 启动nginx 就可以 愉快的网上冲浪了。 + + +### 噢对了 这个时候 还得修改一下 /usr/local/nginx/html 中 html 文件 自定义下 错误文件路径 达到完美的效果 + + +## 已知问题, 可能由于我的https配置问题?用了https 再 server_tokens off;隐藏版本号 后就失效 + + 所以 不隐藏就好了。反正错误页面也可以自定义, 版本也是你 自定义的,相当于隐藏了。 + + 自定义错误可以 参考 nginx屏蔽ip.md 中介绍 + \ No newline at end of file diff --git "a/nginx/ssl\350\257\201\344\271\246.md" "b/nginx/ssl\350\257\201\344\271\246.md" new file mode 100644 index 0000000..6efb4c1 --- /dev/null +++ "b/nginx/ssl\350\257\201\344\271\246.md" @@ -0,0 +1,149 @@ +###Let's Encrypt 宣布 ACME v2 正式支持通配符证书。Let's Encrypt 宣称将继续清除 Web 上采用 HTTPS 的障碍,让每个网站轻松获取管理证书。 + +#### acme.sh +[传送门1](https://github.com/Neilpang/acme.sh/wiki/%E8%AF%B4%E6%98%8E/) + + +#### 参考 该博主 +[传送门2](https://my.oschina.net/kimver/blog/1634575/) + + +### 执行以下命令 + + curl https://get.acme.sh | sh + +my@example.com为你的邮箱 + +>curl https://get.acme.sh | sh -s email=my@example.com + +刷新环境变量 + +>source ~/.bashrc + + +或者从github安装 + +>wget -O - https://raw.githubusercontent.com/acmesh-official/acme.sh/master/acme.sh | sh -s -- --install-online -m my@example.com + +#### 导入阿里云后台的密钥 [获取key](https://ak-console.aliyun.com/#/accesskey/) + + export Ali_Key="dsadhasdkjaskdjsds" + export Ali_Secret="dsfhsfsfksfsldfsfds" + +#### 填写自己的域名生成证书 + acme.sh --issue --dns dns_ali -d ojbk.plus -d *.ojbk.plus + +#### 在证书生成目录执行 +``` + +acme.sh --installcert -d ojbk.plus -d *.ojbk.plus \ + --keypath /usr/local/ssl/ojbk/ojbk.plus.key \ + --fullchainpath /usr/local/ssl/ojbk/ojbk.plus.pem + +``` + +如果有配置nginx脚本可以自动重载ssl + +``` +acme.sh --installcert -d ojbk.plus -d *.ojbk.plus \ + --keypath /usr/local/ssl/ojbk/ojbk.plus.key \ + --fullchainpath /usr/local/ssl/ojbk/ojbk.plus.pem \ + --reloadcmd "service nginx force-reload" +``` + + + + + + + //这样就会把key和pem生成到指定的目录 + + 当然 首先 你先 进入 到 usr/local/ 使用 mkdir 命令创建文件夹 + 创建 ssl 文件 夹、再 进入ssl文件夹中 创建ojbk 文件夹 + 注意 将 权限修改一下 + 修改文件的所有者和组 【组:用户】 + chown -R ojbk:ojbk ssl + + 或者使用 sudo setfacl -m u:ojbk:rwx -R ssl/ + 设定ojbk用户对ssl 文件夹拥有rwx权限 + + + +## 好了 ,这样就拥有了免费的ssl 证书 三个月一次 他会自动 帮你重新 更新证书的 + + +## 2020 / 02 /10 证书无法更新问题 + + 需要 更新一下 脚本 不然一直卡在 Getting domain auth token for each domain + +> /usr/local/acme.sh/acme.sh --upgrade + + 然后重新执行 更新脚本的命令 + +# 2021 / 11 / 23 证书由原来的Let’s Encrypt变成了ZeroSSL + + 由于证书过期了 本能的使用了上面的更新脚本命令 -upgrade + 更新后发现 acme 默认证书已经换成了 ZeroSSL + + 根据提示直接 输入以下命令 + acme.sh --register-account -m myemail@example.com --server zerossl + + myemail@example.com 为你邮箱 随便填都行。 + + 然后重复执行一遍上文的开头的命令即可。 + + +继续用原来的配置的方法 + +配置文件里删掉最后一行的 + +Le_API='https://acme.zerossl.com/v2/DV90' + +再执行 +>acme.sh --set-default-ca --server letsencrypt + + +### 自签名证书 + +1.创建私钥 + +>openssl genrsa -out ojbk.key 1024 + +回车之后就可以得到一个私钥ojbk.key + +2.创建证书签名请求 + +>openssl req -new -key ojbk.key -out ojbk.csr + +回车之后会有一堆等着你输入的东西,直接一路回车。唯独一个Common Name要填成对应网站的IP或者域名: + +Common Name (e.g. server FQDN or YOUR name) []:192.168.0.15 + +上面我直接填了机器的IP:192.168.0.15 + +3.创建自签名证书 + +>openssl x509 -req -in ojbk.csr -signkey ojbk.key -out ojbk.crt + + +回车后会得到一个自签名证书ojbk.crt + +上面的有效期只有1个月 加上 -days 3650 整到10年 + +>openssl x509 -req -in ojbk.csr -out ojbk.crt -signkey ojbk.key -days 3650 + + +使用 +``` + + http { + ... + server { + listen 443 ssl; + ssl_certificate /usr/local/ssl/ojbk.crt; + ssl_certificate_key /usr/local/ssl/ojbk.key; + ... + } + } + +``` diff --git "a/nginx/\345\237\237\345\220\215\351\207\215\345\256\232\345\220\221\345\217\212\345\274\200\345\220\257\347\233\256\345\275\225\346\265\217\350\247\210.md" "b/nginx/\345\237\237\345\220\215\351\207\215\345\256\232\345\220\221\345\217\212\345\274\200\345\220\257\347\233\256\345\275\225\346\265\217\350\247\210.md" new file mode 100644 index 0000000..a5ae62f --- /dev/null +++ "b/nginx/\345\237\237\345\220\215\351\207\215\345\256\232\345\220\221\345\217\212\345\274\200\345\220\257\347\233\256\345\275\225\346\265\217\350\247\210.md" @@ -0,0 +1,147 @@ + +# Nginx-静态文件 + + server{ + listen 80; + server_name static.ojbk.plus; + location / { + root /usr/local/static; + autoindex on; # 开启索引 + charset utf-8; # 解决文件名称中文乱码的问题 + autoindex_exact_size on; # 显示文件大小 + autoindex_localtime on; # 显示最后修改时间 + } + } + + + + + + + +# Nginx-域名重定向 + + + + 把 ojbk.plus 和 www.ojbk.plus 重定向到 www.baidu.com + server{ + listen 80; + server_name www.ojbk.plus ojbk.plus; + rewrite ^/(.*)$ http://www.baidu.com/$1 permanent; + } + + + + + +# Nginx-地址重写 + + Rewriter主要的功能就是实现URL重写,Nginx的Rewriter规则才用(Perl)兼容正则表达式的语法进行规则的匹配. + 如果需要Nginx的Rewriter功能,在编译Nginx之前,需要安装PCRE库 + URL,统一资源定位符 + URI,通用资源标识符 + rewriter 可以出现的地方 + * location + * server + 常用命令 + if(条件){} + set $变量 + return 500 + beak 跳出 rewrite + rewrite 重写 + + + +# Nginx-Rewriter语法 + + + 变量名 + * 变量名可以用 "=" 或者 "!=" 运算符 + ~ + 表示区分大小写字母匹配 + !~ + 跟楼上相反 + ~* + 表示不区分大小写字母匹配 + !~* + 跟楼上相反 + -f !-f + 文集是否存在 + -d !-d + 目录是否存在 + -e !-e + 判断文件或者目录,是否存在 + -x !-x + 判断文件是否可执行 + + # 支持 $1 - $9 位置化参数 + + + + + +# Nginx-Return指令 + + + + 示例:如果以".sh",".bash"结尾,则返回状态码403 + + location ~.*\.(sh|bash)?${ + return 403; + } + + + + + +# Nginx-set,rewrite指令 + + + + set $var '1'; + rewrite指令的最后一个参数为flag标记,支持的flag标记主要有以下几种 + + last :相当于Apache的[L]标记,表示完成rewrite + break :本条规则匹配完成后,终止匹配,不再匹配后面的规则 + redirect:返回302重定向,浏览器地址会显示跳转后的URL地址 + permanent:返回永久301重定向,浏览器地址会显示跳转后的URL地址 + + last和break用来实现URI重写,浏览器地址的URL不变 + redirect和permanent用来实现URL跳转,浏览器的地址会显示跳转后的URL地址 + + 一般在 location 中,或直接在server标签中编写 rewrite规则,推荐使用last标记 + 在非location中,则使用break标记 + + URL rewriter 和反向代理同时进行 + + nginx rewrite指令执行顺序: + 1.执行server块的rewrite指令(这里的块指的是server关键字后{}包围的区域,其它xx块类似) + 2.执行location匹配 + 3.执行选定的location中的rewrite指令 + 如果其中某步URI被重写,则重新循环执行1-3,直到找到真实存在的文件 + + 如果循环超过10次,则返回500 Internal Server Error错误 + + break指令 + 语法:break; + 默认值:无 + 作用域:server,location,if + + 停止执行当前虚拟主机的后续rewrite指令集 + + + +# Nginx-if指令 + + + +> 这个是没 else 的,只有 if + + if ($http_user_agent ~ MSIE) + { + rewrite ^(.*)$/msie/$1 break; + } + if (!-f $request_filename) + { + rewrite ^/img/(.*) /site/$host/images/$1 last; + } diff --git a/php/README.md b/php/README.md new file mode 100644 index 0000000..a8a3764 --- /dev/null +++ b/php/README.md @@ -0,0 +1,5 @@ +# 你好,世界!![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) + + \ No newline at end of file diff --git a/protoBuf/README.md b/protoBuf/README.md new file mode 100644 index 0000000..284202d --- /dev/null +++ b/protoBuf/README.md @@ -0,0 +1,79 @@ +# ProtoBuf + + protocol buffers 是一种语言无关支持 Java、C++、Python 等多种语言、 + 平台无关、可扩展的序列化结构数据的方法它可用于(数据)通信协议、数据存储等。 + + Protocol Buffers 是一种灵活,高效,自动化机制的结构数据序列化方法 + -可类比 XML,但是比 XML 更小(3 ~ 10倍)、更快(20 ~ 100倍)、更为简单。 + + 你可以定义数据的结构,然后使用特殊生成的源代码轻松的在各种数据流中 + 使用各种语言进行编写和读取结构数据。你甚至可以更新数据结构, + 而不破坏由旧数据结构编译的已部署程序。 + + +### 安装环境win + + 主要用于编译 .proto 文件为 相应的语言文件 + +下载地址 +>https://github.com/protocolbuffers/protobuf/releases + +这里我选择protoc-3.15.8-win64 +>https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-win64.zip + +解压后 在环境变量 path中加入D:\protoc-3.15.8-win64\bin; + +打开cmd输入 protoc --version 可以看到相应的版本信息说明安装成功 + +### 使用 + +创建 Message.proto文件 以下为例子 + +```aidl + + +syntax = "proto3"; //protobuf语法有 proto2和proto3两种,这里指定 proto3 + +option java_package = "plus.ojbk.protocol.protobuf"; +option java_outer_classname = "Message"; + +message Message { + string requestId = 1; + CommandType cmd = 2; + string content = 3; + enum CommandType { + NORMAL = 0; //常规业务消息 + HEARTBEAT_REQUEST = 1; //客户端心跳消息 + HEARTBEAT_RESPONSE = 2; //服务端心跳消息 + } +} + + +``` + +我们在上例中定义了一个名为 Message 的 消息,语法很简单,message 关键字后跟上消息名称: + +```aidl + +message xxx { + +} + +``` + +在.proto文件的目录下 打开cmd 输入 protoc Message.proto --java_out=./ 敲回车 + + +Message.proto 为你的.proto文件 +--java_out=“你要输出的位置” +./ 为当前目录 + + +就会自动生成需要的 java类 + +其他语言请自行查阅文档 + + +参考资料: + +>https://colobu.com/2017/03/16/Protobuf3-language-guide/ \ No newline at end of file diff --git a/rabbiteMQ/README.md b/rabbiteMQ/README.md new file mode 100644 index 0000000..ca4a698 --- /dev/null +++ b/rabbiteMQ/README.md @@ -0,0 +1,79 @@ +### 1. 下载Erlang的rpm包 + + RabbitMQ是Erlang语言编写,所以Erang环境必须要有, + 注:Erlang环境一定要与RabbitMQ版本匹配 + +'https://www.rabbitmq.com/which-erlang.html' + + Erlang下载地址:(https://www.rabbitmq.com/releases/erlang/) + +'https://www.rabbitmq.com/releases/erlang/erlang-18.3.4.4-1.el7.centos.x86_64.rpm' + +### 2.下载RabbitMQ的rpm包 + + RabbitMQ下载地址:(https://www.rabbitmq.com/releases/rabbitmq-server/) + +'https://www.rabbitmq.com/releases/rabbitmq-server/v3.6.5/rabbitmq-server-3.6.5-1.noarch.rpm' + +### 3.下载socat的rpm包 + + rabbitmq安装依赖于socat,所以需要下载socat。 + + socat下载地址:(http://repo.iotti.biz/CentOS/) + +'http://repo.iotti.biz/CentOS/6/x86_64/socat-1.7.3.2-1.el6.lux.x86_64.rpm' + + + +### 4.分别安装Erlang、Socat、RabbitMQ + + 要按照顺序安装否则容易 安装失败。 + +>erlang-18.3.4-1.el7.centos.x86_64.rpm + +>rpm -ivh socat-1.7.3.2-1.el6.lux.x86_64.rpm + +>rpm -ivh rabbitmq-server-3.6.5-1.noarch.rpm + +### 5.修改 rabbitmq 配置 + +>vi /usr/lib/rabbitmq/lib/rabbitmq_server-3.6.5/ebin/rabbit.app + + 找到 关键词 loopback_users 将里面 的<<"guest">> 删除 。 + 变为 {loopback_users, []},然后重启服务 + + 这么做的目的是 可以让guest 账号远程登录, 如果不需要 可不操作。 + +### 6.安装管理插件: + +>rabbitmq-plugins enable rabbitmq_management + + + 启动RabbitMQ + +>cd /usr/lib/rabbitmq/bin + +>./rabbitmq-server start + + 浏览器访问: + +'http://XXX.XXX.XXX.XXX:15672/' + + 可以看到RabbitMQ的管理界面。 + + 用户密码管理的操作我们都可以在管理页面中设置。 + +### 7.默认端口 + + client端 端口: 5672 + 管理界面 端口: 15672 + server间内部通信 端口:25672 + erlang发现 端口:4369 + ##https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example + + +>cp /usr/share/doc/rabbitmq-server-3.6.5/rabbitmq.config.example /etc/rabbitmq/ + +>mv rabbitmq.config.example rabbitmq.config + +>vi /etc/rabbitmq/rabbitmq.config diff --git a/reCAPTCHA/README.md b/reCAPTCHA/README.md new file mode 100644 index 0000000..a04c217 --- /dev/null +++ b/reCAPTCHA/README.md @@ -0,0 +1,160 @@ +# 谷歌验证码![image](https://github.com/xx13295/wxm/blob/master/images/o.png?raw=true) + + + 我想大家肯定都见过这样的验证码挺烦的 还得疯狂选图片才能继续下一步。 + +![image](https://github.com/xx13295/MD-Note/blob/master/reCAPTCHA/img/yanzhengma-1.png?raw=true) + +![image](https://github.com/xx13295/MD-Note/blob/master/reCAPTCHA/img/yanzhengma-2.png?raw=true) + + + 这是谷歌的reCAPTCHA v2 验证码 + 现在已经有 V3 了 V3对我们来说是无感的优于V2 + + + +### 文档资料 + + + https://developers.google.com/recaptcha/docs/display + + 由于防火墙的原因 谷歌被墙了 + + 因此下述的代码示例中使用的 是 https://www.recaptcha.net + + 原版地址为 https://www.google.com + + 后缀均相同 + + + +#### 申请验证码 + + 首先要登录谷歌账户就不用说了 + + https://www.google.com/recaptcha/admin/create + +![image](https://github.com/xx13295/MD-Note/blob/master/reCAPTCHA/img/yanzhengma-3.png?raw=true) + +![image](https://github.com/xx13295/MD-Note/blob/master/reCAPTCHA/img/yanzhengma-4.png?raw=true) + + 2个密钥,一个是在客户端(HTML)使用,一个是在服务端使用 + +#### 前端代码 + +例子1. + +``` + + + + + reCAPTCHA demo + + + +
+ + + + + + +``` + 例子2. + +``` + + + + + 谷歌ReCaptcha + + + +
+ +

验证结果

+
+
+	
+	            
+
+ + + + +``` +#### 服务端代码 + +``` + + @Value("${google.recaptcha.validate-api:https://www.recaptcha.net/recaptcha/api/siteverify}") + private String validateApi; + @Value("${google.recaptcha.server-secret:6L**********tY**V****YG}") + private String captchaServerSecret; + + @RequestMapping("/validate") + public Object validate (HttpServletRequest request, @RequestParam("token")String token) { + RestTemplate restTemplate= new RestTemplate(); + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED_VALUE); + MultiValueMap requestBody = new LinkedMultiValueMap<>(); + requestBody.add("secret", this.captchaServerSecret); + requestBody.add("response", token); + requestBody.add("remoteip", request.getRemoteAddr()); // 客户的ip地址,不是必须的参数。 + ResponseEntity responseEntity = restTemplate.postForEntity(this.validateApi, new HttpEntity<>(requestBody,httpHeaders), JSONObject.class); + return responseEntity.getBody(); + } + +``` + diff --git a/reCAPTCHA/img/yanzhengma-1.png b/reCAPTCHA/img/yanzhengma-1.png new file mode 100644 index 0000000..0774498 Binary files /dev/null and b/reCAPTCHA/img/yanzhengma-1.png differ diff --git a/reCAPTCHA/img/yanzhengma-2.png b/reCAPTCHA/img/yanzhengma-2.png new file mode 100644 index 0000000..0793e89 Binary files /dev/null and b/reCAPTCHA/img/yanzhengma-2.png differ diff --git a/reCAPTCHA/img/yanzhengma-3.png b/reCAPTCHA/img/yanzhengma-3.png new file mode 100644 index 0000000..b190f2c Binary files /dev/null and b/reCAPTCHA/img/yanzhengma-3.png differ diff --git a/reCAPTCHA/img/yanzhengma-4.png b/reCAPTCHA/img/yanzhengma-4.png new file mode 100644 index 0000000..4552740 Binary files /dev/null and b/reCAPTCHA/img/yanzhengma-4.png differ diff --git a/redis/README.md b/redis/README.md new file mode 100644 index 0000000..c9c95ac --- /dev/null +++ b/redis/README.md @@ -0,0 +1,70 @@ +# redis安装 + +### 2022年 已全面拥抱docker 请移步docker教程查阅 + + https://github.com/xx13295/MD-Note/blob/master/docker/docker-redis.md + +### 下载redis + +>wget http://download.redis.io/releases/redis-4.0.8.tar.gz + +### 解压 + +>tar xzvf redis-4.0.8.tar.gz + +### 安装 + +>cd redis-4.0.8 + +>make + +>cd src + +>make install PREFIX=/usr/local/redis + +### 移动配置文件到安装目录下 + +>cd ../ + +>mkdir /usr/local/redis/etc + +>mv redis.conf /usr/local/redis/etc + +### 配置redis为后台启动 + +>vi /usr/local/redis/etc/redis.conf + + +### 将redis加入到开机启动 + +>vi /etc/rc.local + + 在里面添加内容:(开启开机自启动) + +>/usr/local/redis/bin/redis-server /usr/local/redis/etc/redis.conf + +### 开启redis + +>/usr/local/redis/bin/redis-server /usr/local/redis/etc/redis.conf + +### 常用命令   + + 启动redis + +>redis-server /usr/local/redis/etc/redis.conf + + 停止redis + +>pkill redis + +### 卸载redis: + + 删除安装目录 + +>rm -rf /usr/local/redis + + 删除所有redis相关命令脚本 + +>rm -rf /usr/bin/redis-* + + diff --git a/redis/redis.conf.6.2.6 b/redis/redis.conf.6.2.6 new file mode 100644 index 0000000..cefb5ed --- /dev/null +++ b/redis/redis.conf.6.2.6 @@ -0,0 +1,2052 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interfece. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 127.0.0.1 -::1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "/etc/redis/redis.log" +#logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save +# +# Redis will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 key changed +# * After 300 seconds (5 minutes) if at least 100 keys changed +# * After 60 seconds if at least 10000 keys changed +# +# You can set these explicitly by uncommenting the three following lines. +# +# save 3600 1 +# save 300 100 +# save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitation checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitation +# yes - Always perform full sanitation +# clients - Perform full sanitation only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitation is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatable with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +requirepass docker.mima.redis + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# To ensure backward compatibility while upgrading Redis 6.0, acl-pubsub-default +# defaults to the 'allchannels' permission. +# +# Future compatibility note: it is very likely that in a future version of Redis +# the directive's default of 'allchannels' will be changed to 'resetchannels' in +# order to provide better out-of-the-box Pub/Sub security. Therefore, it is +# recommended that you explicitly define Pub/Sub permissions for all users +# rather then rely on implicit default values. Once you've set explicit +# Pub/Sub for all existing users, you should uncomment the following line. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/redis/redis6-acl.md b/redis/redis6-acl.md new file mode 100644 index 0000000..0c60639 --- /dev/null +++ b/redis/redis6-acl.md @@ -0,0 +1,56 @@ +# redis6 acl功能 + +在redis6以前,无法进行用户权限管理,只有一个auth密码验证的功能, + +如果验证码通过那么就是root权限,如果我们想要禁用一些redis指令, + +只能使用rename将原指令名字修改,而在redis6中引入了ACL模块, + +可以定制不同用户的权限,例如: + + 1.用户名和密码 + + 2.可以执行的指令 + + 3.可以操作的key + +查看用户信息 +>acl list + +创建mike用户密码为123456仅增对name开头的键有+xx的权限 +具体可以用 acl cat 查看 (命令的合集就是 +@xx ) +>acl setuser mike on >123456 ~name* +@bitmap +select +get +ttl +scan +set +info +type +ping +expire + +创建jake用户密码为123456 拥有全部权限 +>acl setuser jake on >123456 +@all ~* + +查看用户列表 +>acl users + +查看当前用户 +>acl whoami + +删除用户 +>acl deluser username + +禁用用户 +>acl setuser username off + +热加载 acl配置 +>acl load + +以上的操作都是内存中的 没有持久化因此有acl save +保存到aclfile中。但docker 安装的可能有权限问题? +使用手动编辑即可,反正不是常用 +>acl save + +随机返回一个256bit的32字节的伪随机字符串, +并将其转换为64字节的字母+数字组合字符串 +>acl genpass + +>acl genpass 64 + + +## 更多 + +https://redis.io/commands/acl-cat diff --git a/ruby/README.md b/ruby/README.md new file mode 100644 index 0000000..4d4a491 --- /dev/null +++ b/ruby/README.md @@ -0,0 +1,109 @@ +# ruby 安装 + + 下载地址 : + http://www.ruby-lang.org/en/downloads/ + +### 源码安装 + + tar -xvzf ruby-2.7.0.tgz + + cd ruby-2.7.0 + + ./configure + + make + + sudo make install + +### 自动安装 + + yum install ruby + + + 这种方式 安装的 ruby 的版本很低 是 + + [root@VM-4-2-centos ~]# ruby -v + + ruby 2.0.0p648 (2015-12-16) [x86_64-linux] + + +#### 因此可以升级版本 + + 更换阿里云镜像 + gem sources -a http://mirrors.aliyun.com/rubygems/ + +>安装RAM + + RAM(Ruby Version Manager )是一款RAM的命令行工具,可以使用RAM轻松安装 + + 管理Ruby版本。RVM包含了Ruby的版本管理和Gem库管理(gemset) + + gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB + + + curl -sSL https://get.rvm.io | bash -s stable + + source /etc/profile.d/rvm.sh + + 查看RVM版本信息,如果可以代表安装成功。 + + rvm -v + + 查看 ruby 版本 + rvm list known + + 我们选一个 2.7 版本 + + rvm install 2.7 + + 然后漫长等待 + + + ruby -v + + 可以发现ruby版本已经升级到 2.7 了 + + [root@VM-4-2-centos ~]# ruby -v + + ruby 2.7.0p0 (2019-12-25 revision 647ee6f091) [x86_64-linux] + + + + +### 安装bundler + + wget https://rubygems.org/downloads/bundler-2.1.4.gem + + gem install --local bundler-2.1.4.gem + + +### 删除 + + //yum remove ruby* + + +### 安装pg Gem时找不到libpq-fe.h头文件? + + 尝试安装libpq-dev或其他相当的东西 + + 对于Ubuntu系统: + + sudo apt-get install libpq-dev + + 在Red Hat Linux(RHEL)系统上: + + yum install postgresql-devel + + 对于Mac Homebrew: + + brew install postgresql + + 对于Mac MacPorts PostgreSQL: + + gem install pg -- --with-pg-config=/opt/local/lib/postgresql[version number]/bin/pg_config + + 对于OpenSuse: + + zypper in postgresql-devel + + diff --git "a/windows/\345\210\240\351\231\244window\350\277\234\347\250\213\346\241\214\351\235\242\351\223\276\346\216\245.md" "b/windows/\345\210\240\351\231\244window\350\277\234\347\250\213\346\241\214\351\235\242\351\223\276\346\216\245.md" new file mode 100644 index 0000000..61b0830 --- /dev/null +++ "b/windows/\345\210\240\351\231\244window\350\277\234\347\250\213\346\241\214\351\235\242\351\223\276\346\216\245.md" @@ -0,0 +1 @@ +https://answers.microsoft.com/zh-hans/windows/forum/all/answers%e5%88%86%e4%ba%ab%e5%88%a0%e9%99%a4/cfa7982f-acf6-4544-b083-85ff9bf87d2b diff --git "a/windows/\345\256\211\350\243\205windows \346\234\215\345\212\241.md" "b/windows/\345\256\211\350\243\205windows \346\234\215\345\212\241.md" new file mode 100644 index 0000000..5e4bbfa --- /dev/null +++ "b/windows/\345\256\211\350\243\205windows \346\234\215\345\212\241.md" @@ -0,0 +1,16 @@ +# 安装window 服务 + + +安装 demo服务 +>sc create demo start=auto binPath=D:demo.exe + + +简介 +> sc description demo "这是一个demo服务" + + +>net start demo + +>net stop demo + +>sc delete demo diff --git "a/windows/\350\277\234\347\250\213\346\241\214\351\235\242/RDPWrap-v1.6.zip" "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/RDPWrap-v1.6.zip" new file mode 100644 index 0000000..f4cfab5 Binary files /dev/null and "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/RDPWrap-v1.6.zip" differ diff --git "a/windows/\350\277\234\347\250\213\346\241\214\351\235\242/Readme.md" "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/Readme.md" new file mode 100644 index 0000000..c0900fb --- /dev/null +++ "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/Readme.md" @@ -0,0 +1,16 @@ +# win10 远程桌面允许多用户同时访问 + + 解压 RDPWrap-v1.6.zip + + 运行install.bat + + 然后运行RDPConfig.exe + + 看到Diagnostics后面都是绿色的 + + 并且Listener state是Listening [fully supported] + + 说明 破解成功,已经可以支持多用户远程桌面链接 + + 否则请运行 update.bat 再来一遍 + diff --git "a/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdp.md" "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdp.md" new file mode 100644 index 0000000..d72dac1 --- /dev/null +++ "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdp.md" @@ -0,0 +1,67 @@ +# + +C:\Program Files\RDP Wrapper + +在 rdpwrap.ini 里面追加。 + +然后重新update + +最好能用vnc 救援 哈哈 万一等下 rdp没启动就连不上了。 + +10.0.17763.1 看Service state [Running] 后面的ver. 获取 + + +可能要用到的命令 +>net stop termservice + +>net start termservice + +``` +[10.0.17763.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AF8E4 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=77941 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=4D505 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1322C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=4BD09 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17F45 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=5B02A +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=1ABFC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17763.1-SLInit] +bInitialized.x86 =CD798 +bServerSku.x86 =CD79C +lMaxUserSessions.x86 =CD7A0 +bAppServerAllowed.x86 =CD7A8 +bRemoteConnAllowed.x86=CD7AC +bMultimonAllowed.x86 =CD7B0 +ulMaxDebugSessions.x86=CD7B4 +bFUSEnabled.x86 =CD7B8 + +bInitialized.x64 =ECAB0 +bServerSku.x64 =ECAB4 +lMaxUserSessions.x64 =ECAB8 +bAppServerAllowed.x64 =ECAC0 +bRemoteConnAllowed.x64=ECAC4 +bMultimonAllowed.x64 =ECAC8 +ulMaxDebugSessions.x64=ECACC +bFUSEnabled.x64 =ECAD0 + +``` + + diff --git "a/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.dll" "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.dll" new file mode 100644 index 0000000..2af2dc0 Binary files /dev/null and "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.dll" differ diff --git "a/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.ini" "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.ini" new file mode 100644 index 0000000..40e1b54 --- /dev/null +++ "b/windows/\350\277\234\347\250\213\346\241\214\351\235\242/rdpwrap\346\233\277\346\215\242\345\244\247\346\263\225/rdpwrap.ini" @@ -0,0 +1,4998 @@ +; RDP Wrapper Library configuration +; Do not modify without special knowledge + +[Main] +Updated=2018-10-10 +LogFile=\rdpwrap.txt +SLPolicyHookNT60=1 +SLPolicyHookNT61=1 + +[SLPolicy] +TerminalServices-RemoteConnectionManager-AllowRemoteConnections=1 +TerminalServices-RemoteConnectionManager-AllowMultipleSessions=1 +TerminalServices-RemoteConnectionManager-AllowAppServerMode=1 +TerminalServices-RemoteConnectionManager-AllowMultimon=1 +TerminalServices-RemoteConnectionManager-MaxUserSessions=0 +TerminalServices-RemoteConnectionManager-ce0ad219-4670-4988-98fb-89b14c2f072b-MaxSessions=0 +TerminalServices-RemoteConnectionManager-45344fe7-00e6-4ac6-9f01-d01fd4ffadfb-MaxSessions=2 +TerminalServices-RDP-7-Advanced-Compression-Allowed=1 +TerminalServices-RemoteConnectionManager-45344fe7-00e6-4ac6-9f01-d01fd4ffadfb-LocalOnly=0 +TerminalServices-RemoteConnectionManager-8dc86f1d-9969-4379-91c1-06fe1dc60575-MaxSessions=1000 +TerminalServices-DeviceRedirection-Licenses-TSEasyPrintAllowed=1 +TerminalServices-DeviceRedirection-Licenses-PnpRedirectionAllowed=1 +TerminalServices-DeviceRedirection-Licenses-TSMFPluginAllowed=1 +TerminalServices-RemoteConnectionManager-UiEffects-DWMRemotingAllowed=1 + +[PatchCodes] +nop=90 +Zero=00 +jmpshort=EB +nopjmp=90E9 +CDefPolicy_Query_edx_ecx=BA000100008991200300005E90 +CDefPolicy_Query_eax_rcx_jmp=B80001000089813806000090EB +CDefPolicy_Query_eax_esi=B80001000089862003000090 +CDefPolicy_Query_eax_rdi=B80001000089873806000090 +CDefPolicy_Query_eax_ecx=B80001000089812003000090 +CDefPolicy_Query_eax_ecx_jmp=B800010000898120030000EB0E +CDefPolicy_Query_eax_rcx=B80001000089813806000090 +CDefPolicy_Query_edi_rcx=BF0001000089B938060000909090 + +[6.0.6000.16386] +SingleUserPatch.x86=1 +SingleUserOffset.x86=160BF +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=65E3E +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=15CD8 +DefPolicyCode.x86=CDefPolicy_Query_edx_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=5C88F +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx_jmp + +[6.0.6001.18000] +SingleUserPatch.x86=1 +SingleUserOffset.x86=185E4 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=70DBA +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=17FD8 +DefPolicyCode.x86=CDefPolicy_Query_edx_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=65BD7 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx_jmp + +[6.0.6002.18005] +SingleUserPatch.x86=1 +SingleUserOffset.x86=17FA8 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=70FF6 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=179C0 +DefPolicyCode.x86=CDefPolicy_Query_edx_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=65E83 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx_jmp + +[6.0.6002.19214] +SingleUserPatch.x86=1 +SingleUserOffset.x86=17FC4 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=712AA +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=179B8 +DefPolicyCode.x86=CDefPolicy_Query_edx_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=65FF7 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx_jmp + +[6.0.6002.23521] +SingleUserPatch.x86=1 +SingleUserOffset.x86=17FB4 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=71EAA +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=179CC +DefPolicyCode.x86=CDefPolicy_Query_edx_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=669CB +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx_jmp + +[6.1.7600.16385] +SingleUserPatch.x86=1 +SingleUserOffset.x86=19E25 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17D96 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=196F3 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17AD2 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7600.20890] +SingleUserPatch.x86=1 +SingleUserOffset.x86=19E2D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17DF2 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=196FB +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17B0E +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7600.21316] +SingleUserPatch.x86=1 +SingleUserOffset.x86=19E2D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17E3E +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=196FB +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17B5E +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.17514] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A49D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=180E2 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19D53 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D8A +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.18540] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A4E5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=18006 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19D9F +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17C82 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.18637] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A4DD +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=180FA +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19DBB +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17DC6 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.21650] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A49D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=180BE +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19D53 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D5A +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.21866] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A49D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=180BE +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19D53 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D5A +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.22104] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A49D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=180C6 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19D53 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D5E +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.22750] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A655 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17E8E +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19E21 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17C92 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.22843] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A655 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17F96 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19E25 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D6E +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.23403] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A65D +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17F62 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19E29 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17CE2 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.1.7601.24234] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1A675 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17F56 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=19E41 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17D2E +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi + +[6.2.8102.0] +SingleUserPatch.x86=1 +SingleUserOffset.x86=F7E9 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=D840 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=E47C +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=D3E6 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=1B909 +SLPolicyFunc.x86=New_Win8SL +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=1A484 +SLPolicyFunc.x64=New_Win8SL + +[6.2.8250.0] +SingleUserPatch.x86=1 +SingleUserOffset.x86=159C9 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=11E74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=13520 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1187A +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=1A0A9 +SLPolicyFunc.x86=New_Win8SL_CP +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=18FAC +SLPolicyFunc.x64=New_Win8SL + +[6.2.8400.0] +SingleUserPatch.x86=1 +SingleUserOffset.x86=15482 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=20824 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=13E48 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1F102 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=19629 +SLPolicyFunc.x86=New_Win8SL +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=2492C +SLPolicyFunc.x64=New_Win8SL + +[6.2.9200.16384] +SingleUserPatch.x86=1 +SingleUserOffset.x86=15552 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2BAA8 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=13F08 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2A31A +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=19559 +SLPolicyFunc.x86=New_Win8SL +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=21FA8 +SLPolicyFunc.x64=New_Win8SL + +[6.2.9200.17048] +SingleUserPatch.x86=1 +SingleUserOffset.x86=20592 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=20948 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=1F408 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1F206 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=17059 +SLPolicyFunc.x86=New_Win8SL +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=24570 +SLPolicyFunc.x64=New_Win8SL + +[6.2.9200.21166] +SingleUserPatch.x86=1 +SingleUserOffset.x86=1557A +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2BAF8 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=13F30 +DefPolicyCode.x86=CDefPolicy_Query_eax_esi +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2A3B6 +DefPolicyCode.x64=CDefPolicy_Query_eax_rdi +SLPolicyInternal.x86=1 +SLPolicyOffset.x86=19581 +SLPolicyFunc.x86=New_Win8SL +SLPolicyInternal.x64=1 +SLPolicyOffset.x64=21FD0 +SLPolicyFunc.x64=New_Win8SL + +[6.3.9431.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=8A611 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=9F721 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=306A8 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=367F9 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2EA25 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=350FD +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=196B0 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2F9C0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.16384] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A2729 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=81824 +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=18028 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=20241 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=16115 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=57829 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=1CEB0 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=554C0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.17095] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A36D1 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=B9159 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36BA9 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=21829 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=37529 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1F6A1 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=117F1 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=3B110 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.17415] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=B33F8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B2D9 +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=37115 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=33CE9 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3CFF9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=45825 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=18478 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=5DBC0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.18692] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=B3458 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B2E9 +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=37105 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=37039 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3CFE9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=45835 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=18488 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=5DBD0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.18708] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=B35D8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B376 +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=370F5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=36FE9 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3CFD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=457D5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=18308 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=5DB70 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.18928] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=B39D8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B25D +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=37D25 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=36C09 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D6F9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=45495 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=18328 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=5D830 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.3.9600.19093] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=B3958 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8AE4E +LocalOnlyCode.x64=nopjmp +SingleUserPatch.x86=1 +SingleUserOffset.x86=3F045 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=36BC9 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D899 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=45305 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=18288 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=5D660 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.4.9841.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=956A8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=81141 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=30125 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=12159 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3B989 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=C125 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46A68 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=1EA50 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.4.9860.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=962C8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=81091 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=30845 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=11AA9 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3BEC9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=B9F5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46F18 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=1EB00 +SLInitFunc.x64=New_CSLQuery_Initialize + +[6.4.9879.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9CC8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=95611 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=30C55 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=16A34 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2DAB9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1BDC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=41132 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=24750 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.9926.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8C28 +LocalOnlyCode.x86=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=31725 +SingleUserCode.x86=nop +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3CF99 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +SLInitHook.x86=1 +SLInitOffset.x86=3F140 +SLInitFunc.x86=New_CSLQuery_Initialize +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=95FF1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=12A34 +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=BE05 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=24EC0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.10041.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9D88 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=97141 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=32215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=15C64 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2DFC9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=B795 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46960 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22E40 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.10240.16384] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7D38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=96901 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=32A95 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=18F74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2F5B9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=22865 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46581 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=250F0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.10586.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7C18 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=96AA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=353B5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=190D4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30B69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=229A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=469DE +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=25220 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.10586.589] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7BE8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=96A51 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=353B5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=190D4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30B69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=229A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=469DE +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=25220 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.11082.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7C98 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=96AB1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35405 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=190D4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30BB9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=229A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46A3E +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=25220 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.11102.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D58 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=95CD1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35A85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2A9C4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30159 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B5D5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44FD2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D160 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14251.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D58 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=95CD1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35A85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2A9C4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30159 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B5D5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44FD2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D160 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14271.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4CE8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=941E1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35915 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=263F4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF79 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1C185 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47725 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CE50 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14279.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4D28 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=94191 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35915 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=263F4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF79 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1C185 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47725 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CE50 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14295.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4D28 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D691 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35925 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=25514 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1BA35 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47748 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C860 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14300.1000] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F5F1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=26B04 +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D125 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=CC60 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14316.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E88 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F5F1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=32B55 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=26B04 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3C1C9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D295 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46ABD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC60 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14328.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E88 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F5F1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=32B55 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=26B04 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3C1C9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D365 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46ABD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC60 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14332.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E98 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F601 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=357E5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2AE44 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=316A9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1C025 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4755F +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CAD0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14342.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E98 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8EF31 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=357E5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=26774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=316A9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1CEF5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4755F +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CA20 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14352.1002] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4478 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D911 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35465 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=24474 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30099 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AC05 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44792 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CDB0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14366.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9088 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FB01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34F65 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=21DE4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=316E9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1A855 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4793E +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CCE0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14367.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9088 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FB01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34F65 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=21DE4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=316E9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1A855 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4793E +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CCE0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14372.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7698 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F931 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34635 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=295A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B295 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=460D2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC10 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14379.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7698 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F941 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34635 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=295A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B295 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=460D2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC10 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14383.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7698 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F941 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34635 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=295A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B295 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=460D2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC10 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14385.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7698 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F941 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34635 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=295A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B295 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=460D2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CC10 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14388.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6038 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D781 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=359C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=299A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF29 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AFC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45636 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C930 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14393.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6038 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D781 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=359C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=299A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF29 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AFC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45636 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C930 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14393.1198] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6088 +LocalOnlyCode.x86=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=359C5 +SingleUserCode.x86=nop +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF29 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +SLInitHook.x86=1 +SLInitOffset.x86=45636 +SLInitFunc.x86=New_CSLQuery_Initialize + +[10.0.14393.1737] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6198 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D861 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35AD5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=299A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30039 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AFC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45724 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C930 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14393.2457] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6248 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D811 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36CE5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=29CF4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31209 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B545 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45824 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C920 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14901.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6038 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D781 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=359C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=299A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF29 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AFC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45636 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C930 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14905.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6038 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D781 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=359C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=299A4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF29 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AFC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45636 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C930 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14915.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6D98 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E241 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35E35 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=29EB4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=30399 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B4A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46092 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CE40 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14926.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A6D18 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E071 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35E55 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=29EB4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=303B9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1B4A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=460A2 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=CE40 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14931.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4908 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B411 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35705 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=29264 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FF69 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1AD05 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=452FD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C7FC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14936.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A3F38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8B9A1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35355 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=25174 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1BB55 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44CFE +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=C62C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14942.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A3F38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=9115B +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35355 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=199BD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1064E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44CFE +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=258EC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14946.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A4018 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=911AB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35355 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=199AD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1064E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44CFD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=258DC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14951.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A78D8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=94A6B +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3BA85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1CEDD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=32629 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=11E9E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=3F680 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22EE0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14955.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A78D8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=94A6B +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3BA85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1CEDD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=32629 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=11E9E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=3F680 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22EE0 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14959.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A79B8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=934AB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=2EF05 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=16A0D +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2A4E9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10A8E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=448A0 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=26960 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14965.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7868 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=9345B +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3BA85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17DFD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=32A59 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1212E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=3F680 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=26610 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14971.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7968 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=925FB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36FE5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1803D +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D9A9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=11FBE +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46500 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=26180 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14986.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7878 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=926BB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36FA5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=17FFD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D979 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=11F7E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=464A0 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=26140 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.14997.1001] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=931EB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=274ED +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D95E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=E000 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15002.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9698 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=931EB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=346B5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=274ED +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D779 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D95E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47D90 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=E000 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15007.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9648 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=931EB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34665 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=274ED +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D719 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D95E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47D30 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=E000 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15014.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9648 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=931EB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34685 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=274ED +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3D739 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1D95E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=47D50 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=E000 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15019.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A39F8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=928FB +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=BADF5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=FBDD +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=A8479 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx_jmp +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=20AAE +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=3C240 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=24480 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15025.1000] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=9259B +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=2C08D +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1DD0E +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=E5B8 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15031.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5BA8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E221 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=30A75 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2A114 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2B1D9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1C7B5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4532D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D80C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15042.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5BA8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E221 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=30A75 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=2A114 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2B1D9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1C7B5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4532D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D80C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15046.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=80BB8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E361 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=31E95 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=15E14 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=38A19 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=E745 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=9422D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=21FFC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15048.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=80BB8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E361 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=31E95 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=15E14 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=38A19 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=E745 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=9422D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=21FFC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15055.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5348 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8D2E1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=374C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=181E4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3BAD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10B65 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=44EFF +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22AEC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15058.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D68 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CAA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35075 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=24E74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2DD65 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4549D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D1EC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15061.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D68 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CAA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35075 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=24E74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2DD65 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4549D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D1EC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15063.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D68 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CAA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35075 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=24E74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2DD65 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4549D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D1EC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15063.296] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A5D68 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CAA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=35075 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=24E74 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=2FCD9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=2DD65 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4549D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=D1EC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15063.994] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CB01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=15EA4 +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=FAE5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=234DC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.15063.1155] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CB01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=15EA4 +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=FAE5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=234DC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16179.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AA568 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8C141 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34425 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=16F84 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31219 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1E7F5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45F30 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=21700 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16184.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AA568 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8C141 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=34425 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=16F84 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31219 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1E7F5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45F30 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=21700 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16199.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=ABA68 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8CED1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=348C5 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=28C14 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=319B9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=CB25 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=469B0 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=FA30 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16215.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7CE8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8DE21 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39F05 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=28724 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3E019 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=CC15 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=46462 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=FB00 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16232.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7D38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8DD41 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39F35 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=287B4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3E0C9 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=CC15 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4650F +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=FB00 +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16237.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7F38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E911 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39F85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1BC84 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3E119 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=DA55 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4655D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2180C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16241.1001] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A7F38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E911 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39F85 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1BC84 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3E119 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=DA55 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4655D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2180C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16251.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=ABC88 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8EC21 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3A525 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1BCB4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31779 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=DAF5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=447FD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2183C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16251.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=ABC88 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8EC21 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3A525 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1BCB4 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31779 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=DAF5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=447FD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2183C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16257.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB718 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E841 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=33925 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=11364 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3C409 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1EFD5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4504D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2495C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16257.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB718 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E841 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=33925 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=11364 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3C409 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1EFD5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4504D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2495C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16273.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB798 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8E871 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=33925 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=11364 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3C409 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=1EFD5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4504D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2495C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16275.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9388 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=90001 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39435 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C724 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DE89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D75 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=463D4 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D0C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16278.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9388 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=90001 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39435 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C724 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DE89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D75 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=463D4 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D0C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16281.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16288.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16291.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16294.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16296.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16299.0] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16299.15] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E08 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FD01 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39215 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C774 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DC89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=461BD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D5C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16353.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A9388 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=90001 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=39435 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1C724 +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DE89 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D75 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=463D4 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D0C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.16362.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8E38 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FBA1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=2F61C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=19D1C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DE99 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=463D4 +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D9C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17004.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=A8EB8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8FB41 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=2F65C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=19D1C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=3DF09 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=12D85 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=4643F +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22D9C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17017.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB388 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F291 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3477C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1977C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31049 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=125A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45CDD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=227DC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17025.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB498 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F291 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=3477C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1977C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31049 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=125A5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45CDD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=227DC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17035.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB3F8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F271 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=354AC +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=14E7C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31F19 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10CB5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45C4D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22AEC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17046.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AB3F8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=8F281 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=354AC +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=14E8C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=31F19 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10CC5 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=45C4D +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22AFC +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17063.1000] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AD7F8 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=92671 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36B0C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=153CC +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=33569 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=111CE +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=474AD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=2318C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17115.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AD738 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=925D1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36B0C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1511C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=33569 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10E78 +DefPolicyCode.x64=CDefPolicy_Query_edi_rcx +SLInitHook.x86=1 +SLInitOffset.x86=474AD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22E6C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17128.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AD738 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=925D1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36B0C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1511C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=33569 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10E78 +DefPolicyCode.x64=CDefPolicy_Query_edi_rcx +SLInitHook.x86=1 +SLInitOffset.x86=474AD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22E6C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17133.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AD738 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=925D1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36B0C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1511C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=33569 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10E78 +DefPolicyCode.x64=CDefPolicy_Query_edi_rcx +SLInitHook.x86=1 +SLInitOffset.x86=474AD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22E6C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17134.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AD738 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=925D1 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=36B0C +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1511C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=33569 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=10E78 +DefPolicyCode.x64=CDefPolicy_Query_edi_rcx +SLInitHook.x86=1 +SLInitOffset.x86=474AD +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=22E6C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17723.1000] +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=75D91 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x64=1 +SingleUserOffset.x64=1296C +SingleUserCode.x64=Zero +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17A45 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x64=1 +SLInitOffset.x64=1B10C +SLInitFunc.x64=New_CSLQuery_Initialize + +[10.0.17763.1] +LocalOnlyPatch.x86=1 +LocalOnlyOffset.x86=AF8E4 +LocalOnlyCode.x86=jmpshort +LocalOnlyPatch.x64=1 +LocalOnlyOffset.x64=77941 +LocalOnlyCode.x64=jmpshort +SingleUserPatch.x86=1 +SingleUserOffset.x86=4D505 +SingleUserCode.x86=nop +SingleUserPatch.x64=1 +SingleUserOffset.x64=1322C +SingleUserCode.x64=Zero +DefPolicyPatch.x86=1 +DefPolicyOffset.x86=4BD09 +DefPolicyCode.x86=CDefPolicy_Query_eax_ecx +DefPolicyPatch.x64=1 +DefPolicyOffset.x64=17F45 +DefPolicyCode.x64=CDefPolicy_Query_eax_rcx +SLInitHook.x86=1 +SLInitOffset.x86=5B02A +SLInitFunc.x86=New_CSLQuery_Initialize +SLInitHook.x64=1 +SLInitOffset.x64=1ABFC +SLInitFunc.x64=New_CSLQuery_Initialize + +[SLInit] +bServerSku=1 +bRemoteConnAllowed=1 +bFUSEnabled=1 +bAppServerAllowed=1 +bMultimonAllowed=1 +lMaxUserSessions=0 +ulMaxDebugSessions=0 +bInitialized=1 + +[6.3.9431.0-SLInit] +bFUSEnabled.x86 =A22A8 +lMaxUserSessions.x86 =A22AC +bAppServerAllowed.x86 =A22B0 +bInitialized.x86 =A22B4 +bMultimonAllowed.x86 =A22B8 +bServerSku.x86 =A22BC +ulMaxDebugSessions.x86=A22C0 +bRemoteConnAllowed.x86=A22C4 + +bFUSEnabled.x64 =C4490 +lMaxUserSessions.x64 =C4494 +bAppServerAllowed.x64 =C4498 +bInitialized.x64 =C449C +bMultimonAllowed.x64 =C44A0 +bServerSku.x64 =C44A4 +ulMaxDebugSessions.x64=C44A8 +bRemoteConnAllowed.x64=C44AC + +[6.3.9600.16384-SLInit] +bFUSEnabled.x86 =C02A8 +lMaxUserSessions.x86 =C02AC +bAppServerAllowed.x86 =C02B0 +bInitialized.x86 =C02B4 +bMultimonAllowed.x86 =C02B8 +bServerSku.x86 =C02BC +ulMaxDebugSessions.x86=C02C0 +bRemoteConnAllowed.x86=C02C4 + +bServerSku.x64 =E6494 +ulMaxDebugSessions.x64=E6498 +bRemoteConnAllowed.x64=E649C +bFUSEnabled.x64 =E64A0 +lMaxUserSessions.x64 =E64A4 +bAppServerAllowed.x64 =E64A8 +bInitialized.x64 =E64AC +bMultimonAllowed.x64 =E64B0 + +[6.3.9600.17095-SLInit] +bFUSEnabled.x86 =C12A8 +lMaxUserSessions.x86 =C12AC +bAppServerAllowed.x86 =C12B0 +bInitialized.x86 =C12B4 +bMultimonAllowed.x86 =C12B8 +bServerSku.x86 =C12BC +ulMaxDebugSessions.x86=C12C0 +bRemoteConnAllowed.x86=C12C4 + +bServerSku.x64 =E4494 +ulMaxDebugSessions.x64=E4498 +bRemoteConnAllowed.x64=E449C +bFUSEnabled.x64 =E44A0 +lMaxUserSessions.x64 =E44A4 +bAppServerAllowed.x64 =E44A8 +bInitialized.x64 =E44AC +bMultimonAllowed.x64 =E44B0 + +[6.3.9600.17415-SLInit] +bFUSEnabled.x86 =D3068 +lMaxUserSessions.x86 =D306C +bAppServerAllowed.x86 =D3070 +bInitialized.x86 =D3074 +bMultimonAllowed.x86 =D3078 +bServerSku.x86 =D307C +ulMaxDebugSessions.x86=D3080 +bRemoteConnAllowed.x86=D3084 + +bFUSEnabled.x64 =F9054 +lMaxUserSessions.x64 =F9058 +bAppServerAllowed.x64 =F905C +bInitialized.x64 =F9060 +bMultimonAllowed.x64 =F9064 +bServerSku.x64 =F9068 +ulMaxDebugSessions.x64=F906C +bRemoteConnAllowed.x64=F9070 + +[6.3.9600.18692-SLInit] +bFUSEnabled.x86 =D3068 +lMaxUserSessions.x86 =D306C +bAppServerAllowed.x86 =D3070 +bInitialized.x86 =D3074 +bMultimonAllowed.x86 =D3078 +bServerSku.x86 =D307C +ulMaxDebugSessions.x86=D3080 +bRemoteConnAllowed.x86=D3084 + +bFUSEnabled.x64 =F9054 +lMaxUserSessions.x64 =F9058 +bAppServerAllowed.x64 =F905C +bInitialized.x64 =F9060 +bMultimonAllowed.x64 =F9064 +bServerSku.x64 =F9068 +ulMaxDebugSessions.x64=F906C +bRemoteConnAllowed.x64=F9070 + +[6.3.9600.18708-SLInit] +bFUSEnabled.x86 =D3068 +lMaxUserSessions.x86 =D306C +bAppServerAllowed.x86 =D3070 +bInitialized.x86 =D3074 +bMultimonAllowed.x86 =D3078 +bServerSku.x86 =D307C +ulMaxDebugSessions.x86=D3080 +bRemoteConnAllowed.x86=D3084 + +bFUSEnabled.x64 =FA054 +lMaxUserSessions.x64 =FA058 +bAppServerAllowed.x64 =FA05C +bInitialized.x64 =FA060 +bMultimonAllowed.x64 =FA064 +bServerSku.x64 =FA068 +ulMaxDebugSessions.x64=FA06C +bRemoteConnAllowed.x64=FA070 + +[6.3.9600.18928-SLInit] +bFUSEnabled.x86 =D3068 +lMaxUserSessions.x86 =D306C +bAppServerAllowed.x86 =D3070 +bInitialized.x86 =D3074 +bMultimonAllowed.x86 =D3078 +bServerSku.x86 =D307C +ulMaxDebugSessions.x86=D3080 +bRemoteConnAllowed.x86=D3084 + +bFUSEnabled.x64 =FA054 +lMaxUserSessions.x64 =FA058 +bAppServerAllowed.x64 =FA05C +bInitialized.x64 =FA060 +bMultimonAllowed.x64 =FA064 +bServerSku.x64 =FA068 +ulMaxDebugSessions.x64=FA06C +bRemoteConnAllowed.x64=FA070 + +[6.3.9600.19093-SLInit] +bFUSEnabled.x86 =D3068 +lMaxUserSessions.x86 =D306C +bAppServerAllowed.x86 =D3070 +bInitialized.x86 =D3074 +bMultimonAllowed.x86 =D3078 +bServerSku.x86 =D307C +ulMaxDebugSessions.x86=D3080 +bRemoteConnAllowed.x86=D3084 + +bFUSEnabled.x64 =FA054 +lMaxUserSessions.x64 =FA058 +bAppServerAllowed.x64 =FA05C +bInitialized.x64 =FA060 +bMultimonAllowed.x64 =FA064 +bServerSku.x64 =FA068 +ulMaxDebugSessions.x64=FA06C +bRemoteConnAllowed.x64=FA070 + +[6.4.9841.0-SLInit] +bFUSEnabled.x86 =BF9F0 +lMaxUserSessions.x86 =BF9F4 +bAppServerAllowed.x86 =BF9F8 +bInitialized.x86 =BF9FC +bMultimonAllowed.x86 =BFA00 +bServerSku.x86 =BFA04 +ulMaxDebugSessions.x86=BFA08 +bRemoteConnAllowed.x86=BFA0C + +bFUSEnabled.x64 =ECFF8 +lMaxUserSessions.x64 =ECFFC +bAppServerAllowed.x64 =ED000 +bInitialized.x64 =ED004 +bMultimonAllowed.x64 =ED008 +bServerSku.x64 =ED00C +ulMaxDebugSessions.x64=ED010 +bRemoteConnAllowed.x64=ED014 + +[6.4.9860.0-SLInit] +bFUSEnabled.x86 =BF7E0 +lMaxUserSessions.x86 =BF7E4 +bAppServerAllowed.x86 =BF7E8 +bInitialized.x86 =BF7EC +bMultimonAllowed.x86 =BF7F0 +bServerSku.x86 =BF7F4 +ulMaxDebugSessions.x86=BF7F8 +bRemoteConnAllowed.x86=BF7FC + +bFUSEnabled.x64 =ECBD8 +lMaxUserSessions.x64 =ECBDC +bAppServerAllowed.x64 =ECBE0 +bInitialized.x64 =ECBE4 +bMultimonAllowed.x64 =ECBE8 +bServerSku.x64 =ECBEC +ulMaxDebugSessions.x64=ECBF0 +bRemoteConnAllowed.x64=ECBF4 + +[6.4.9879.0-SLInit] +bFUSEnabled.x86 =C27D8 +lMaxUserSessions.x86 =C27DC +bAppServerAllowed.x86 =C27E0 +bInitialized.x86 =C27E4 +bMultimonAllowed.x86 =C27E8 +bServerSku.x86 =C27EC +ulMaxDebugSessions.x86=C27F0 +bRemoteConnAllowed.x86=C27F4 + +bFUSEnabled.x64 =EDBF0 +lMaxUserSessions.x64 =EDBF4 +bAppServerAllowed.x64 =EDBF8 +bInitialized.x64 =EDBFC +bMultimonAllowed.x64 =EDC00 +bServerSku.x64 =EDC04 +ulMaxDebugSessions.x64=EDC08 +bRemoteConnAllowed.x64=EDC0C + +[10.0.9926.0-SLInit] +bFUSEnabled.x86 =C17D8 +lMaxUserSessions.x86 =C17DC +bAppServerAllowed.x86 =C17E0 +bInitialized.x86 =C17E4 +bMultimonAllowed.x86 =C17E8 +bServerSku.x86 =C17EC +ulMaxDebugSessions.x86=C17F0 +bRemoteConnAllowed.x86=C17F4 + +bFUSEnabled.x64 =EEBF0 +lMaxUserSessions.x64 =EEBF4 +bAppServerAllowed.x64 =EEBF8 +bInitialized.x64 =EEBFC +bMultimonAllowed.x64 =EEC00 +bServerSku.x64 =EEC04 +ulMaxDebugSessions.x64=EEC08 +bRemoteConnAllowed.x64=EEC0C + +[10.0.10041.0-SLInit] +bFUSEnabled.x86 =C5F60 +lMaxUserSessions.x86 =C5F64 +bAppServerAllowed.x86 =C5F68 +bInitialized.x86 =C5F6C +bMultimonAllowed.x86 =C5F70 +bServerSku.x86 =C5F74 +ulMaxDebugSessions.x86=C5F78 +bRemoteConnAllowed.x86=C5F7C + +bFUSEnabled.x64 =F3448 +lMaxUserSessions.x64 =F344C +bAppServerAllowed.x64 =F3450 +bInitialized.x64 =F3454 +bMultimonAllowed.x64 =F3458 +bServerSku.x64 =F345C +ulMaxDebugSessions.x64=F3460 +bRemoteConnAllowed.x64=F3464 + +[10.0.10240.16384-SLInit] +bFUSEnabled.x86 =C3F60 +lMaxUserSessions.x86 =C3F64 +bAppServerAllowed.x86 =C3F68 +bInitialized.x86 =C3F6C +bMultimonAllowed.x86 =C3F70 +bServerSku.x86 =C3F74 +ulMaxDebugSessions.x86=C3F78 +bRemoteConnAllowed.x86=C3F7C + +lMaxUserSessions.x64 =F23B0 +bAppServerAllowed.x64 =F23B4 +bServerSku.x64 =F23B8 +bFUSEnabled.x64 =F3460 +bInitialized.x64 =F3464 +bMultimonAllowed.x64 =F3468 +ulMaxDebugSessions.x64=F346C +bRemoteConnAllowed.x64=F3470 + +[10.0.10586.0-SLInit] +bFUSEnabled.x86 =C3F60 +lMaxUserSessions.x86 =C3F64 +bAppServerAllowed.x86 =C3F68 +bInitialized.x86 =C3F6C +bMultimonAllowed.x86 =C3F70 +bServerSku.x86 =C3F74 +ulMaxDebugSessions.x86=C3F78 +bRemoteConnAllowed.x86=C3F7C + +lMaxUserSessions.x64 =F23B0 +bAppServerAllowed.x64 =F23B4 +bServerSku.x64 =F23B8 +bFUSEnabled.x64 =F3460 +bInitialized.x64 =F3464 +bMultimonAllowed.x64 =F3468 +ulMaxDebugSessions.x64=F346C +bRemoteConnAllowed.x64=F3470 + +[10.0.10586.589-SLInit] +bFUSEnabled.x86 =C3F60 +lMaxUserSessions.x86 =C3F64 +bAppServerAllowed.x86 =C3F68 +bInitialized.x86 =C3F6C +bMultimonAllowed.x86 =C3F70 +bServerSku.x86 =C3F74 +ulMaxDebugSessions.x86=C3F78 +bRemoteConnAllowed.x86=C3F7C + +lMaxUserSessions.x64 =F23B0 +bAppServerAllowed.x64 =F23B4 +bServerSku.x64 =F23B8 +bFUSEnabled.x64 =F3460 +bInitialized.x64 =F3464 +bMultimonAllowed.x64 =F3468 +ulMaxDebugSessions.x64=F346C +bRemoteConnAllowed.x64=F3470 + +[10.0.11082.1000-SLInit] +bFUSEnabled.x86 =C3F60 +lMaxUserSessions.x86 =C3F64 +bAppServerAllowed.x86 =C3F68 +bInitialized.x86 =C3F6C +bMultimonAllowed.x86 =C3F70 +bServerSku.x86 =C3F74 +ulMaxDebugSessions.x86=C3F78 +bRemoteConnAllowed.x86=C3F7C + +lMaxUserSessions.x64 =F23B0 +bAppServerAllowed.x64 =F23B4 +bServerSku.x64 =F23B8 +bFUSEnabled.x64 =F3460 +bInitialized.x64 =F3464 +bMultimonAllowed.x64 =F3468 +ulMaxDebugSessions.x64=F346C +bRemoteConnAllowed.x64=F3470 + +[10.0.11102.1000-SLInit] +bInitialized.x86 =C1F5C +bServerSku.x86 =C1F60 +lMaxUserSessions.x86 =C1F64 +bAppServerAllowed.x86 =C1F68 +bRemoteConnAllowed.x86=C1F6C +bMultimonAllowed.x86 =C1F70 +ulMaxDebugSessions.x86=C1F74 +bFUSEnabled.x86 =C1F78 + +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 +bServerSku.x64 =F244C +lMaxUserSessions.x64 =F2450 +bAppServerAllowed.x64 =F2454 + +[10.0.14251.1000-SLInit] +bInitialized.x86 =C1F5C +bServerSku.x86 =C1F60 +lMaxUserSessions.x86 =C1F64 +bAppServerAllowed.x86 =C1F68 +bRemoteConnAllowed.x86=C1F6C +bMultimonAllowed.x86 =C1F70 +ulMaxDebugSessions.x86=C1F74 +bFUSEnabled.x86 =C1F78 + +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 +bServerSku.x64 =F244C +lMaxUserSessions.x64 =F2450 +bAppServerAllowed.x64 =F2454 + +[10.0.14271.1000-SLInit] +bInitialized.x86 =C0F5C +bServerSku.x86 =C0F60 +lMaxUserSessions.x86 =C0F64 +bAppServerAllowed.x86 =C0F68 +bRemoteConnAllowed.x86=C0F6C +bMultimonAllowed.x86 =C0F70 +ulMaxDebugSessions.x86=C0F74 +bFUSEnabled.x86 =C0F78 + +bServerSku.x64 =EF3C0 +lMaxUserSessions.x64 =EF3C4 +bAppServerAllowed.x64 =EF3C8 +bInitialized.x64 =F0460 +bRemoteConnAllowed.x64=F0464 +bMultimonAllowed.x64 =F0468 +ulMaxDebugSessions.x64=F046C +bFUSEnabled.x64 =F0470 + +[10.0.14279.1000-SLInit] +bInitialized.x86 =C0F5C +bServerSku.x86 =C0F60 +lMaxUserSessions.x86 =C0F64 +bAppServerAllowed.x86 =C0F68 +bRemoteConnAllowed.x86=C0F6C +bMultimonAllowed.x86 =C0F70 +ulMaxDebugSessions.x86=C0F74 +bFUSEnabled.x86 =C0F78 + +bServerSku.x64 =EF3C0 +lMaxUserSessions.x64 =EF3C4 +bAppServerAllowed.x64 =EF3C8 +bInitialized.x64 =F0460 +bRemoteConnAllowed.x64=F0464 +bMultimonAllowed.x64 =F0468 +ulMaxDebugSessions.x64=F046C +bFUSEnabled.x64 =F0470 + +[10.0.14295.1000-SLInit] +bInitialized.x86 =C0F5C +bServerSku.x86 =C0F60 +lMaxUserSessions.x86 =C0F64 +bAppServerAllowed.x86 =C0F68 +bRemoteConnAllowed.x86=C0F6C +bMultimonAllowed.x86 =C0F70 +ulMaxDebugSessions.x86=C0F74 +bFUSEnabled.x86 =C0F78 + +bServerSku.x64 =E73C0 +lMaxUserSessions.x64 =E73C4 +bAppServerAllowed.x64 =E73C8 +bInitialized.x64 =E8460 +bRemoteConnAllowed.x64=E8464 +bMultimonAllowed.x64 =E8468 +ulMaxDebugSessions.x64=E846C +bFUSEnabled.x64 =E8470 + +[10.0.14300.1000-SLInit] +bServerSku.x64 =E93C0 +lMaxUserSessions.x64 =E93C4 +bAppServerAllowed.x64 =E93C8 +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 + +[10.0.14316.1000-SLInit] +bInitialized.x86 =C4F58 +bServerSku.x86 =C4F5C +lMaxUserSessions.x86 =C4F60 +bAppServerAllowed.x86 =C4F64 +bRemoteConnAllowed.x86=C4F68 +bMultimonAllowed.x86 =C4F6C +ulMaxDebugSessions.x86=C4F70 +bFUSEnabled.x86 =C4F74 + +bServerSku.x64 =E93C0 +lMaxUserSessions.x64 =E93C4 +bAppServerAllowed.x64 =E93C8 +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 + +[10.0.14328.1000-SLInit] +bInitialized.x86 =C4F58 +bServerSku.x86 =C4F5C +lMaxUserSessions.x86 =C4F60 +bAppServerAllowed.x86 =C4F64 +bRemoteConnAllowed.x86=C4F68 +bMultimonAllowed.x86 =C4F6C +ulMaxDebugSessions.x86=C4F70 +bFUSEnabled.x86 =C4F74 + +bServerSku.x64 =E93C0 +lMaxUserSessions.x64 =E93C4 +bAppServerAllowed.x64 =E93C8 +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 + +[10.0.14332.1001-SLInit] +bInitialized.x86 =C4F58 +bServerSku.x86 =C4F5C +lMaxUserSessions.x86 =C4F60 +bAppServerAllowed.x86 =C4F64 +bRemoteConnAllowed.x86=C4F68 +bMultimonAllowed.x86 =C4F6C +ulMaxDebugSessions.x86=C4F70 +bFUSEnabled.x86 =C4F74 + +bServerSku.x64 =E93C0 +lMaxUserSessions.x64 =E93C4 +bAppServerAllowed.x64 =E93C8 +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 + +[10.0.14342.1000-SLInit] +bInitialized.x86 =C4F58 +bServerSku.x86 =C4F5C +lMaxUserSessions.x86 =C4F60 +bAppServerAllowed.x86 =C4F64 +bRemoteConnAllowed.x86=C4F68 +bMultimonAllowed.x86 =C4F6C +ulMaxDebugSessions.x86=C4F70 +bFUSEnabled.x86 =C4F74 + +bInitialized.x64 =E9430 +bRemoteConnAllowed.x64=E9434 +bMultimonAllowed.x64 =E9438 +ulMaxDebugSessions.x64=E943C +bFUSEnabled.x64 =E9440 +bServerSku.x64 =E944C +lMaxUserSessions.x64 =E9450 +bAppServerAllowed.x64 =E9454 + +[10.0.14352.1002-SLInit] +bInitialized.x86 =C0F5C +bServerSku.x86 =C0F60 +lMaxUserSessions.x86 =C0F64 +bAppServerAllowed.x86 =C0F68 +bRemoteConnAllowed.x86=C0F6C +bMultimonAllowed.x86 =C0F70 +ulMaxDebugSessions.x86=C0F74 +bFUSEnabled.x86 =C0F78 + +bServerSku.x64 =E73C0 +lMaxUserSessions.x64 =E73C4 +bAppServerAllowed.x64 =E73C8 +bInitialized.x64 =E8460 +bRemoteConnAllowed.x64=E8464 +bMultimonAllowed.x64 =E8468 +ulMaxDebugSessions.x64=E846C +bFUSEnabled.x64 =E8470 + +[10.0.14366.0-SLInit] +bInitialized.x86 =C4F68 +bServerSku.x86 =C4F6C +lMaxUserSessions.x86 =C4F70 +bAppServerAllowed.x86 =C4F74 +bRemoteConnAllowed.x86=C4F78 +bMultimonAllowed.x86 =C4F7C +ulMaxDebugSessions.x86=C4F80 +bFUSEnabled.x86 =C4F84 + +bServerSku.x64 =E93E0 +lMaxUserSessions.x64 =E93E4 +bAppServerAllowed.x64 =E93E8 +bInitialized.x64 =EA480 +bRemoteConnAllowed.x64=EA484 +bMultimonAllowed.x64 =EA488 +ulMaxDebugSessions.x64=EA48C +bFUSEnabled.x64 =EA490 + +[10.0.14367.0-SLInit] +bInitialized.x86 =C4F68 +bServerSku.x86 =C4F6C +lMaxUserSessions.x86 =C4F70 +bAppServerAllowed.x86 =C4F74 +bRemoteConnAllowed.x86=C4F78 +bMultimonAllowed.x86 =C4F7C +ulMaxDebugSessions.x86=C4F80 +bFUSEnabled.x86 =C4F84 + +bServerSku.x64 =E93E0 +lMaxUserSessions.x64 =E93E4 +bAppServerAllowed.x64 =E93E8 +bInitialized.x64 =EA480 +bRemoteConnAllowed.x64=EA484 +bMultimonAllowed.x64 =EA488 +ulMaxDebugSessions.x64=EA48C +bFUSEnabled.x64 =EA490 + +[10.0.14372.0-SLInit] +bInitialized.x86 =C3F68 +bServerSku.x86 =C3F6C +lMaxUserSessions.x86 =C3F70 +bAppServerAllowed.x86 =C3F74 +bRemoteConnAllowed.x86=C3F78 +bMultimonAllowed.x86 =C3F7C +ulMaxDebugSessions.x86=C3F80 +bFUSEnabled.x86 =C3F84 + +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 +bServerSku.x64 =EA47C +lMaxUserSessions.x64 =EA480 +bAppServerAllowed.x64 =EA484 + +[10.0.14379.0-SLInit] +bInitialized.x86 =C3F68 +bServerSku.x86 =C3F6C +lMaxUserSessions.x86 =C3F70 +bAppServerAllowed.x86 =C3F74 +bRemoteConnAllowed.x86=C3F78 +bMultimonAllowed.x86 =C3F7C +ulMaxDebugSessions.x86=C3F80 +bFUSEnabled.x86 =C3F84 + +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 +bServerSku.x64 =EA47C +lMaxUserSessions.x64 =EA480 +bAppServerAllowed.x64 =EA484 + +[10.0.14383.0-SLInit] +bInitialized.x86 =C3F68 +bServerSku.x86 =C3F6C +lMaxUserSessions.x86 =C3F70 +bAppServerAllowed.x86 =C3F74 +bRemoteConnAllowed.x86=C3F78 +bMultimonAllowed.x86 =C3F7C +ulMaxDebugSessions.x86=C3F80 +bFUSEnabled.x86 =C3F84 + +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 +bServerSku.x64 =EA47C +lMaxUserSessions.x64 =EA480 +bAppServerAllowed.x64 =EA484 + +[10.0.14385.0-SLInit] +bInitialized.x86 =C3F68 +bServerSku.x86 =C3F6C +lMaxUserSessions.x86 =C3F70 +bAppServerAllowed.x86 =C3F74 +bRemoteConnAllowed.x86=C3F78 +bMultimonAllowed.x86 =C3F7C +ulMaxDebugSessions.x86=C3F80 +bFUSEnabled.x86 =C3F84 + +bInitialized.x64 =EA460 +bRemoteConnAllowed.x64=EA464 +bMultimonAllowed.x64 =EA468 +ulMaxDebugSessions.x64=EA46C +bFUSEnabled.x64 =EA470 +bServerSku.x64 =EA47C +lMaxUserSessions.x64 =EA480 +bAppServerAllowed.x64 =EA484 + +[10.0.14388.0-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14393.0-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14393.1198-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +[10.0.14393.1737-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14393.2457-SLInit] +bInitialized.x86 =C1F94 +bServerSku.x86 =C1F98 +lMaxUserSessions.x86 =C1F9C +bAppServerAllowed.x86 =C1FA0 +bRemoteConnAllowed.x86=C1FA4 +bMultimonAllowed.x86 =C1FA8 +ulMaxDebugSessions.x86=C1FAC +bFUSEnabled.x86 =C1FB0 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14901.1000-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14905.1000-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E73D0 +lMaxUserSessions.x64 =E73D4 +bAppServerAllowed.x64 =E73D8 +bInitialized.x64 =E8470 +bRemoteConnAllowed.x64=E8474 +bMultimonAllowed.x64 =E8478 +ulMaxDebugSessions.x64=E847C +bFUSEnabled.x64 =E8480 + +[10.0.14915.1000-SLInit] +bInitialized.x86 =C4F6C +bServerSku.x86 =C4F70 +lMaxUserSessions.x86 =C4F74 +bAppServerAllowed.x86 =C4F78 +bRemoteConnAllowed.x86=C4F7C +bMultimonAllowed.x86 =C4F80 +ulMaxDebugSessions.x86=C4F84 +bFUSEnabled.x86 =C4F88 + +bServerSku.x64 =E93D0 +lMaxUserSessions.x64 =E93D4 +bAppServerAllowed.x64 =E93D8 +bInitialized.x64 =EA470 +bRemoteConnAllowed.x64=EA474 +bMultimonAllowed.x64 =EA478 +ulMaxDebugSessions.x64=EA47C +bFUSEnabled.x64 =EA480 + +[10.0.14926.1000-SLInit] +bInitialized.x86 =C4F6C +bServerSku.x86 =C4F70 +lMaxUserSessions.x86 =C4F74 +bAppServerAllowed.x86 =C4F78 +bRemoteConnAllowed.x86=C4F7C +bMultimonAllowed.x86 =C4F80 +ulMaxDebugSessions.x86=C4F84 +bFUSEnabled.x86 =C4F88 + +bServerSku.x64 =E93D0 +lMaxUserSessions.x64 =E93D4 +bAppServerAllowed.x64 =E93D8 +bInitialized.x64 =EA470 +bRemoteConnAllowed.x64=EA474 +bMultimonAllowed.x64 =EA478 +ulMaxDebugSessions.x64=EA47C +bFUSEnabled.x64 =EA480 + +[10.0.14931.1000-SLInit] +bInitialized.x86 =C1F6C +bServerSku.x86 =C1F70 +lMaxUserSessions.x86 =C1F74 +bAppServerAllowed.x86 =C1F78 +bRemoteConnAllowed.x86=C1F7C +bMultimonAllowed.x86 =C1F80 +ulMaxDebugSessions.x86=C1F84 +bFUSEnabled.x86 =C1F88 + +bServerSku.x64 =E63D0 +lMaxUserSessions.x64 =E63D4 +bAppServerAllowed.x64 =E63D8 +bInitialized.x64 =E7470 +bRemoteConnAllowed.x64=E7474 +bMultimonAllowed.x64 =E7478 +ulMaxDebugSessions.x64=E747C +bFUSEnabled.x64 =E7480 + +[10.0.14936.1000-SLInit] +bInitialized.x86 =C0F6C +bServerSku.x86 =C0F70 +lMaxUserSessions.x86 =C0F74 +bAppServerAllowed.x86 =C0F78 +bRemoteConnAllowed.x86=C0F7C +bMultimonAllowed.x86 =C0F80 +ulMaxDebugSessions.x86=C0F84 +bFUSEnabled.x86 =C0F88 + +bInitialized.x64 =E8460 +bRemoteConnAllowed.x64=E8464 +bMultimonAllowed.x64 =E8468 +ulMaxDebugSessions.x64=E846C +bFUSEnabled.x64 =E8470 +bServerSku.x64 =E847C +lMaxUserSessions.x64 =E8480 +bAppServerAllowed.x64 =E8484 + +[10.0.14942.1000-SLInit] +bInitialized.x86 =C0F6C +bServerSku.x86 =C0F70 +lMaxUserSessions.x86 =C0F74 +bAppServerAllowed.x86 =C0F78 +bRemoteConnAllowed.x86=C0F7C +bMultimonAllowed.x86 =C0F80 +ulMaxDebugSessions.x86=C0F84 +bFUSEnabled.x86 =C0F88 + +bInitialized.x64 =EC460 +bRemoteConnAllowed.x64=EC464 +bMultimonAllowed.x64 =EC468 +ulMaxDebugSessions.x64=EC46C +bFUSEnabled.x64 =EC470 +bServerSku.x64 =EC47C +lMaxUserSessions.x64 =EC480 +bAppServerAllowed.x64 =EC484 + +[10.0.14946.1000-SLInit] +bInitialized.x86 =C0F6C +bServerSku.x86 =C0F70 +lMaxUserSessions.x86 =C0F74 +bAppServerAllowed.x86 =C0F78 +bRemoteConnAllowed.x86=C0F7C +bMultimonAllowed.x86 =C0F80 +ulMaxDebugSessions.x86=C0F84 +bFUSEnabled.x86 =C0F88 + +bInitialized.x64 =EC460 +bRemoteConnAllowed.x64=EC464 +bMultimonAllowed.x64 =EC468 +ulMaxDebugSessions.x64=EC46C +bFUSEnabled.x64 =EC470 +bServerSku.x64 =EC47C +lMaxUserSessions.x64 =EC480 +bAppServerAllowed.x64 =EC484 + +[10.0.14951.1000-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bServerSku.x64 =EF3D0 +lMaxUserSessions.x64 =EF3D4 +bAppServerAllowed.x64 =EF3D8 +bInitialized.x64 =F0470 +bRemoteConnAllowed.x64=F0474 +bMultimonAllowed.x64 =F0478 +ulMaxDebugSessions.x64=F047C +bFUSEnabled.x64 =F0480 + +[10.0.14955.1000-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bServerSku.x64 =EF3D0 +lMaxUserSessions.x64 =EF3D4 +bAppServerAllowed.x64 =EF3D8 +bInitialized.x64 =F0470 +bRemoteConnAllowed.x64=F0474 +bMultimonAllowed.x64 =F0478 +ulMaxDebugSessions.x64=F047C +bFUSEnabled.x64 =F0480 + +[10.0.14959.1000-SLInit] +bInitialized.x86 =C4F68 +bServerSku.x86 =C4F6C +lMaxUserSessions.x86 =C4F70 +bAppServerAllowed.x86 =C4F74 +bRemoteConnAllowed.x86=C4F78 +bMultimonAllowed.x86 =C4F7C +ulMaxDebugSessions.x86=C4F80 +bFUSEnabled.x86 =C4F84 + +bServerSku.x64 =EE3D0 +lMaxUserSessions.x64 =EE3D4 +bAppServerAllowed.x64 =EE3D8 +bInitialized.x64 =EF470 +bRemoteConnAllowed.x64=EF474 +bMultimonAllowed.x64 =EF478 +ulMaxDebugSessions.x64=EF47C +bFUSEnabled.x64 =EF480 + +[10.0.14965.1001-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bInitialized.x64 =EF460 +bRemoteConnAllowed.x64=EF464 +bMultimonAllowed.x64 =EF468 +ulMaxDebugSessions.x64=EF46C +bFUSEnabled.x64 =EF470 +bServerSku.x64 =EF47C +lMaxUserSessions.x64 =EF480 +bAppServerAllowed.x64 =EF484 + +[10.0.14971.1000-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bServerSku.x64 =EE3C0 +lMaxUserSessions.x64 =EE3C4 +bAppServerAllowed.x64 =EE3C8 +bInitialized.x64 =EE470 +bRemoteConnAllowed.x64=EE474 +bMultimonAllowed.x64 =EE478 +ulMaxDebugSessions.x64=EE47C +bFUSEnabled.x64 =EE480 + +[10.0.14986.1000-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bServerSku.x64 =EE3C0 +lMaxUserSessions.x64 =EE3C4 +bAppServerAllowed.x64 =EE3C8 +bInitialized.x64 =EE470 +bRemoteConnAllowed.x64=EE474 +bMultimonAllowed.x64 =EE478 +ulMaxDebugSessions.x64=EE47C +bFUSEnabled.x64 =EE480 + +[10.0.14997.1001-SLInit] +bServerSku.x64 =F0408 +lMaxUserSessions.x64 =F040C +bAppServerAllowed.x64 =F0410 +bInitialized.x64 =F0480 +bRemoteConnAllowed.x64=F0484 +bMultimonAllowed.x64 =F0488 +ulMaxDebugSessions.x64=F048C +bFUSEnabled.x64 =F0490 + +[10.0.15002.1001-SLInit] +bInitialized.x86 =C6F74 +bServerSku.x86 =C6F78 +lMaxUserSessions.x86 =C6F7C +bAppServerAllowed.x86 =C6F80 +bRemoteConnAllowed.x86=C6F84 +bMultimonAllowed.x86 =C6F88 +ulMaxDebugSessions.x86=C6F8C +bFUSEnabled.x86 =C6F90 + +bServerSku.x64 =F0408 +lMaxUserSessions.x64 =F040C +bAppServerAllowed.x64 =F0410 +bInitialized.x64 =F0480 +bRemoteConnAllowed.x64=F0484 +bMultimonAllowed.x64 =F0488 +ulMaxDebugSessions.x64=F048C +bFUSEnabled.x64 =F0490 + +[10.0.15007.1000-SLInit] +bInitialized.x86 =C6F74 +bServerSku.x86 =C6F78 +lMaxUserSessions.x86 =C6F7C +bAppServerAllowed.x86 =C6F80 +bRemoteConnAllowed.x86=C6F84 +bMultimonAllowed.x86 =C6F88 +ulMaxDebugSessions.x86=C6F8C +bFUSEnabled.x86 =C6F90 + +bServerSku.x64 =F0408 +lMaxUserSessions.x64 =F040C +bAppServerAllowed.x64 =F0410 +bInitialized.x64 =F0480 +bRemoteConnAllowed.x64=F0484 +bMultimonAllowed.x64 =F0488 +ulMaxDebugSessions.x64=F048C +bFUSEnabled.x64 =F0490 + +[10.0.15014.1000-SLInit] +bInitialized.x86 =C6F74 +bServerSku.x86 =C6F78 +lMaxUserSessions.x86 =C6F7C +bAppServerAllowed.x86 =C6F80 +bRemoteConnAllowed.x86=C6F84 +bMultimonAllowed.x86 =C6F88 +ulMaxDebugSessions.x86=C6F8C +bFUSEnabled.x86 =C6F90 + +bServerSku.x64 =F0408 +lMaxUserSessions.x64 =F040C +bAppServerAllowed.x64 =F0410 +bInitialized.x64 =F0480 +bRemoteConnAllowed.x64=F0484 +bMultimonAllowed.x64 =F0488 +ulMaxDebugSessions.x64=F048C +bFUSEnabled.x64 =F0490 + +[10.0.15019.1000-SLInit] +bInitialized.x86 =C5F68 +bServerSku.x86 =C5F6C +lMaxUserSessions.x86 =C5F70 +bAppServerAllowed.x86 =C5F74 +bRemoteConnAllowed.x86=C5F78 +bMultimonAllowed.x86 =C5F7C +ulMaxDebugSessions.x86=C5F80 +bFUSEnabled.x86 =C5F84 + +bServerSku.x64 =ECBDC +lMaxUserSessions.x64 =ECBE0 +bAppServerAllowed.x64 =ECBE4 +bInitialized.x64 =F0490 +bRemoteConnAllowed.x64=F0494 +bMultimonAllowed.x64 =F0498 +ulMaxDebugSessions.x64=F049C +bFUSEnabled.x64 =F04A0 + +[10.0.15025.1000-SLInit] +bServerSku.x64 =EE3E0 +lMaxUserSessions.x64 =EE3E4 +bAppServerAllowed.x64 =EE3E8 +bInitialized.x64 =EF488 +bRemoteConnAllowed.x64=EF48C +bMultimonAllowed.x64 =EF490 +ulMaxDebugSessions.x64=EF494 +bFUSEnabled.x64 =EF498 + +[10.0.15031.0-SLInit] +bInitialized.x86 =C2F6C +bServerSku.x86 =C2F70 +lMaxUserSessions.x86 =C2F74 +bAppServerAllowed.x86 =C2F78 +bRemoteConnAllowed.x86=C2F7C +bMultimonAllowed.x86 =C2F80 +ulMaxDebugSessions.x86=C2F84 +bFUSEnabled.x86 =C2F88 + +bServerSku.x64 =E93E0 +lMaxUserSessions.x64 =E93E4 +bAppServerAllowed.x64 =E93E8 +bInitialized.x64 =EA488 +bRemoteConnAllowed.x64=EA48C +bMultimonAllowed.x64 =EA490 +ulMaxDebugSessions.x64=EA494 +bFUSEnabled.x64 =EA498 + +[10.0.15042.0-SLInit] +bInitialized.x86 =C2F6C +bServerSku.x86 =C2F70 +lMaxUserSessions.x86 =C2F74 +bAppServerAllowed.x86 =C2F78 +bRemoteConnAllowed.x86=C2F7C +bMultimonAllowed.x86 =C2F80 +ulMaxDebugSessions.x86=C2F84 +bFUSEnabled.x86 =C2F88 + +bServerSku.x64 =E93E0 +lMaxUserSessions.x64 =E93E4 +bAppServerAllowed.x64 =E93E8 +bInitialized.x64 =EA488 +bRemoteConnAllowed.x64=EA48C +bMultimonAllowed.x64 =EA490 +ulMaxDebugSessions.x64=EA494 +bFUSEnabled.x64 =EA498 + +[10.0.15046.0-SLInit] +bInitialized.x86 =C4F18 +bServerSku.x86 =C4F1C +lMaxUserSessions.x86 =C4F20 +bAppServerAllowed.x86 =C4F24 +bRemoteConnAllowed.x86=C4F28 +ulMaxDebugSessions.x86=C4F2C +bMultimonAllowed.x86 =C5010 +bFUSEnabled.x86 =C5014 + +bInitialized.x64 =EB468 +bRemoteConnAllowed.x64=EB46C +bMultimonAllowed.x64 =EB470 +ulMaxDebugSessions.x64=EB474 +bFUSEnabled.x64 =EB478 +bServerSku.x64 =EB484 +lMaxUserSessions.x64 =EB488 +bAppServerAllowed.x64 =EB48C + +[10.0.15048.0-SLInit] +bInitialized.x86 =C4F18 +bServerSku.x86 =C4F1C +lMaxUserSessions.x86 =C4F20 +bAppServerAllowed.x86 =C4F24 +bRemoteConnAllowed.x86=C4F28 +ulMaxDebugSessions.x86=C4F2C +bMultimonAllowed.x86 =C5010 +bFUSEnabled.x86 =C5014 + +bInitialized.x64 =EB468 +bRemoteConnAllowed.x64=EB46C +bMultimonAllowed.x64 =EB470 +ulMaxDebugSessions.x64=EB474 +bFUSEnabled.x64 =EB478 +bServerSku.x64 =EB484 +lMaxUserSessions.x64 =EB488 +bAppServerAllowed.x64 =EB48C + +[10.0.15055.0-SLInit] +bInitialized.x86 =C2F70 +bServerSku.x86 =C2F74 +lMaxUserSessions.x86 =C2F78 +bAppServerAllowed.x86 =C2F7C +bRemoteConnAllowed.x86=C2F80 +bMultimonAllowed.x86 =C2F84 +ulMaxDebugSessions.x86=C2F88 +bFUSEnabled.x86 =C2F8C + +bServerSku.x64 =E83D8 +lMaxUserSessions.x64 =E83DC +bAppServerAllowed.x64 =E83E0 +bInitialized.x64 =E9490 +bRemoteConnAllowed.x64=E9494 +bMultimonAllowed.x64 =E9498 +ulMaxDebugSessions.x64=E949C +bFUSEnabled.x64 =E94A0 + +[10.0.15058.0-SLInit] +bInitialized.x86 =C2F70 +bServerSku.x86 =C2F74 +lMaxUserSessions.x86 =C2F78 +bAppServerAllowed.x86 =C2F7C +bRemoteConnAllowed.x86=C2F80 +bMultimonAllowed.x86 =C2F84 +ulMaxDebugSessions.x86=C2F88 +bFUSEnabled.x86 =C2F8C + +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.15061.0-SLInit] +bInitialized.x86 =C2F70 +bServerSku.x86 =C2F74 +lMaxUserSessions.x86 =C2F78 +bAppServerAllowed.x86 =C2F7C +bRemoteConnAllowed.x86=C2F80 +bMultimonAllowed.x86 =C2F84 +ulMaxDebugSessions.x86=C2F88 +bFUSEnabled.x86 =C2F8C + +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.15063.0-SLInit] +bInitialized.x86 =C2F70 +bServerSku.x86 =C2F74 +lMaxUserSessions.x86 =C2F78 +bAppServerAllowed.x86 =C2F7C +bRemoteConnAllowed.x86=C2F80 +bMultimonAllowed.x86 =C2F84 +ulMaxDebugSessions.x86=C2F88 +bFUSEnabled.x86 =C2F8C + +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.15063.296-SLInit] +bInitialized.x86 =C2F70 +bServerSku.x86 =C2F74 +lMaxUserSessions.x86 =C2F78 +bAppServerAllowed.x86 =C2F7C +bRemoteConnAllowed.x86=C2F80 +bMultimonAllowed.x86 =C2F84 +ulMaxDebugSessions.x86=C2F88 +bFUSEnabled.x86 =C2F8C + +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.15063.994-SLInit] +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.15063.1155-SLInit] +bInitialized.x64 =E9468 +bRemoteConnAllowed.x64=E946C +bMultimonAllowed.x64 =E9470 +ulMaxDebugSessions.x64=E9474 +bFUSEnabled.x64 =E9478 +bServerSku.x64 =E9484 +lMaxUserSessions.x64 =E9488 +bAppServerAllowed.x64 =E948C + +[10.0.16179.1000-SLInit] +bInitialized.x86 =C7F6C +bServerSku.x86 =C7F70 +lMaxUserSessions.x86 =C7F74 +bAppServerAllowed.x86 =C7F78 +bRemoteConnAllowed.x86=C7F7C +bMultimonAllowed.x86 =C7F80 +ulMaxDebugSessions.x86=C7F84 +bFUSEnabled.x86 =C7F88 + +bServerSku.x64 =E83D8 +lMaxUserSessions.x64 =E83DC +bAppServerAllowed.x64 =E83E0 +bInitialized.x64 =E9490 +bRemoteConnAllowed.x64=E9494 +bMultimonAllowed.x64 =E9498 +ulMaxDebugSessions.x64=E949C +bFUSEnabled.x64 =E94A0 + +[10.0.16184.1001-SLInit] +bInitialized.x86 =C7F6C +bServerSku.x86 =C7F70 +lMaxUserSessions.x86 =C7F74 +bAppServerAllowed.x86 =C7F78 +bRemoteConnAllowed.x86=C7F7C +bMultimonAllowed.x86 =C7F80 +ulMaxDebugSessions.x86=C7F84 +bFUSEnabled.x86 =C7F88 + +bServerSku.x64 =E83D8 +lMaxUserSessions.x64 =E83DC +bAppServerAllowed.x64 =E83E0 +bInitialized.x64 =E9490 +bRemoteConnAllowed.x64=E9494 +bMultimonAllowed.x64 =E9498 +ulMaxDebugSessions.x64=E949C +bFUSEnabled.x64 =E94A0 + +[10.0.16199.1000-SLInit] +bInitialized.x86 =C8F74 +bServerSku.x86 =C8F78 +lMaxUserSessions.x86 =C8F7C +bAppServerAllowed.x86 =C8F80 +bRemoteConnAllowed.x86=C8F84 +bMultimonAllowed.x86 =C8F88 +ulMaxDebugSessions.x86=C8F8C +bFUSEnabled.x86 =C8F90 + +bServerSku.x64 =E83E8 +lMaxUserSessions.x64 =E83EC +bAppServerAllowed.x64 =E83F0 +bInitialized.x64 =E94A0 +bRemoteConnAllowed.x64=E94A4 +bMultimonAllowed.x64 =E94A8 +ulMaxDebugSessions.x64=E94AC +bFUSEnabled.x64 =E94B0 + +[10.0.16215.1000-SLInit] +bInitialized.x86 =C5F78 +bServerSku.x86 =C5F7C +lMaxUserSessions.x86 =C5F80 +bAppServerAllowed.x86 =C5F84 +bRemoteConnAllowed.x86=C5F88 +bMultimonAllowed.x86 =C5F8C +ulMaxDebugSessions.x86=C5F90 +bFUSEnabled.x86 =C5F94 + +bServerSku.x64 =EA3E8 +lMaxUserSessions.x64 =EA3EC +bAppServerAllowed.x64 =EA3F0 +bInitialized.x64 =EB4A0 +bRemoteConnAllowed.x64=EB4A4 +bMultimonAllowed.x64 =EB4A8 +ulMaxDebugSessions.x64=EB4AC +bFUSEnabled.x64 =EB4B0 + +[10.0.16232.1000-SLInit] +bInitialized.x86 =C5F78 +bServerSku.x86 =C5F7C +lMaxUserSessions.x86 =C5F80 +bAppServerAllowed.x86 =C5F84 +bRemoteConnAllowed.x86=C5F88 +bMultimonAllowed.x86 =C5F8C +ulMaxDebugSessions.x86=C5F90 +bFUSEnabled.x86 =C5F94 + +bServerSku.x64 =EA3E8 +lMaxUserSessions.x64 =EA3EC +bAppServerAllowed.x64 =EA3F0 +bInitialized.x64 =EB4A0 +bRemoteConnAllowed.x64=EB4A4 +bMultimonAllowed.x64 =EB4A8 +ulMaxDebugSessions.x64=EB4AC +bFUSEnabled.x64 =EB4B0 + +[10.0.16237.1001-SLInit] +bInitialized.x86 =C5F78 +bServerSku.x86 =C5F7C +lMaxUserSessions.x86 =C5F80 +bAppServerAllowed.x86 =C5F84 +bRemoteConnAllowed.x86=C5F88 +bMultimonAllowed.x86 =C5F8C +ulMaxDebugSessions.x86=C5F90 +bFUSEnabled.x86 =C5F94 + +bServerSku.x64 =EB3EC +lMaxUserSessions.x64 =EB3F0 +bAppServerAllowed.x64 =EB3F4 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16241.1001-SLInit] +bInitialized.x86 =C5F78 +bServerSku.x86 =C5F7C +lMaxUserSessions.x86 =C5F80 +bAppServerAllowed.x86 =C5F84 +bRemoteConnAllowed.x86=C5F88 +bMultimonAllowed.x86 =C5F8C +ulMaxDebugSessions.x86=C5F90 +bFUSEnabled.x86 =C5F94 + +bServerSku.x64 =EB3EC +lMaxUserSessions.x64 =EB3F0 +bAppServerAllowed.x64 =EB3F4 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16251.0-SLInit] +bInitialized.x86 =C9F78 +bServerSku.x86 =C9F7C +lMaxUserSessions.x86 =C9F80 +bAppServerAllowed.x86 =C9F84 +bRemoteConnAllowed.x86=C9F88 +bMultimonAllowed.x86 =C9F8C +ulMaxDebugSessions.x86=C9F90 +bFUSEnabled.x86 =C9F94 + +bServerSku.x64 =EB3EC +lMaxUserSessions.x64 =EB3F0 +bAppServerAllowed.x64 =EB3F4 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16251.1000-SLInit] +bInitialized.x86 =C9F78 +bServerSku.x86 =C9F7C +lMaxUserSessions.x86 =C9F80 +bAppServerAllowed.x86 =C9F84 +bRemoteConnAllowed.x86=C9F88 +bMultimonAllowed.x86 =C9F8C +ulMaxDebugSessions.x86=C9F90 +bFUSEnabled.x86 =C9F94 + +bServerSku.x64 =EB3EC +lMaxUserSessions.x64 =EB3F0 +bAppServerAllowed.x64 =EB3F4 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16257.1-SLInit] +bInitialized.x86 =C9F7C +bServerSku.x86 =C9F80 +lMaxUserSessions.x86 =C9F84 +bAppServerAllowed.x86 =C9F88 +bRemoteConnAllowed.x86=C9F8C +bMultimonAllowed.x86 =C9F90 +ulMaxDebugSessions.x86=C9F94 +bFUSEnabled.x86 =C9F98 + +bServerSku.x64 =EB3F0 +lMaxUserSessions.x64 =EB3F4 +bAppServerAllowed.x64 =EB3F8 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16257.1000-SLInit] +bInitialized.x86 =C9F7C +bServerSku.x86 =C9F80 +lMaxUserSessions.x86 =C9F84 +bAppServerAllowed.x86 =C9F88 +bRemoteConnAllowed.x86=C9F8C +bMultimonAllowed.x86 =C9F90 +ulMaxDebugSessions.x86=C9F94 +bFUSEnabled.x86 =C9F98 + +bServerSku.x64 =EB3F0 +lMaxUserSessions.x64 =EB3F4 +bAppServerAllowed.x64 =EB3F8 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16273.1000-SLInit] +bInitialized.x86 =C9F7C +bServerSku.x86 =C9F80 +lMaxUserSessions.x86 =C9F84 +bAppServerAllowed.x86 =C9F88 +bRemoteConnAllowed.x86=C9F8C +bMultimonAllowed.x86 =C9F90 +ulMaxDebugSessions.x86=C9F94 +bFUSEnabled.x86 =C9F98 + +bServerSku.x64 =EB3F0 +lMaxUserSessions.x64 =EB3F4 +bAppServerAllowed.x64 =EB3F8 +bInitialized.x64 =EC4A0 +bRemoteConnAllowed.x64=EC4A4 +bMultimonAllowed.x64 =EC4A8 +ulMaxDebugSessions.x64=EC4AC +bFUSEnabled.x64 =EC4B0 + +[10.0.16275.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16278.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16281.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16288.1-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16291.0-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16294.1-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16296.0-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16299.0-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16299.15-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16353.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.16362.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.17004.1000-SLInit] +bInitialized.x86 =C6F7C +bServerSku.x86 =C6F80 +lMaxUserSessions.x86 =C6F84 +bAppServerAllowed.x86 =C6F88 +bRemoteConnAllowed.x86=C6F8C +bMultimonAllowed.x86 =C6F90 +ulMaxDebugSessions.x86=C6F94 +bFUSEnabled.x86 =C6F98 + +bServerSku.x64 =ED3E8 +lMaxUserSessions.x64 =ED3EC +bAppServerAllowed.x64 =ED3F0 +bInitialized.x64 =EE4A0 +bRemoteConnAllowed.x64=EE4A4 +bMultimonAllowed.x64 =EE4A8 +ulMaxDebugSessions.x64=EE4AC +bFUSEnabled.x64 =EE4B0 + +[10.0.17017.1000-SLInit] +bInitialized.x86 =C9EB8 +bServerSku.x86 =C9EBC +lMaxUserSessions.x86 =C9EC0 +bAppServerAllowed.x86 =C9EC4 +bRemoteConnAllowed.x86=C9EC8 +bMultimonAllowed.x86 =C9ECC +ulMaxDebugSessions.x86=C9ED0 +bFUSEnabled.x86 =C9ED4 + +bServerSku.x64 =EC2E8 +lMaxUserSessions.x64 =EC2EC +bAppServerAllowed.x64 =EC2F0 +bInitialized.x64 =ED3A0 +bRemoteConnAllowed.x64=ED3A4 +bMultimonAllowed.x64 =ED3A8 +ulMaxDebugSessions.x64=ED3AC +bFUSEnabled.x64 =ED3B0 + +[10.0.17025.1000-SLInit] +bInitialized.x86 =C9EB8 +bServerSku.x86 =C9EBC +lMaxUserSessions.x86 =C9EC0 +bAppServerAllowed.x86 =C9EC4 +bRemoteConnAllowed.x86=C9EC8 +bMultimonAllowed.x86 =C9ECC +ulMaxDebugSessions.x86=C9ED0 +bFUSEnabled.x86 =C9ED4 + +bServerSku.x64 =EC2E8 +lMaxUserSessions.x64 =EC2EC +bAppServerAllowed.x64 =EC2F0 +bInitialized.x64 =ED3A0 +bRemoteConnAllowed.x64=ED3A4 +bMultimonAllowed.x64 =ED3A8 +ulMaxDebugSessions.x64=ED3AC +bFUSEnabled.x64 =ED3B0 + +[10.0.17035.1000-SLInit] +bInitialized.x86 =C9ED8 +bServerSku.x86 =C9EDC +lMaxUserSessions.x86 =C9EE0 +bAppServerAllowed.x86 =C9EE4 +bRemoteConnAllowed.x86=C9EE8 +bMultimonAllowed.x86 =C9EEC +ulMaxDebugSessions.x86=C9EF0 +bFUSEnabled.x86 =C9EF4 + +bServerSku.x64 =EC2E8 +lMaxUserSessions.x64 =EC2EC +bAppServerAllowed.x64 =EC2F0 +bInitialized.x64 =ED3A0 +bRemoteConnAllowed.x64=ED3A4 +bMultimonAllowed.x64 =ED3A8 +ulMaxDebugSessions.x64=ED3AC +bFUSEnabled.x64 =ED3B0 + +[10.0.17046.1000-SLInit] +bInitialized.x86 =C9ED8 +bServerSku.x86 =C9EDC +lMaxUserSessions.x86 =C9EE0 +bAppServerAllowed.x86 =C9EE4 +bRemoteConnAllowed.x86=C9EE8 +bMultimonAllowed.x86 =C9EEC +ulMaxDebugSessions.x86=C9EF0 +bFUSEnabled.x86 =C9EF4 + +bServerSku.x64 =EC2E8 +lMaxUserSessions.x64 =EC2EC +bAppServerAllowed.x64 =EC2F0 +bInitialized.x64 =ED3A0 +bRemoteConnAllowed.x64=ED3A4 +bMultimonAllowed.x64 =ED3A8 +ulMaxDebugSessions.x64=ED3AC +bFUSEnabled.x64 =ED3B0 + +[10.0.17063.1000-SLInit] +bInitialized.x86 =CBF38 +bServerSku.x86 =CBF3C +lMaxUserSessions.x86 =CBF40 +bAppServerAllowed.x86 =CBF44 +bRemoteConnAllowed.x86=CBF48 +bMultimonAllowed.x86 =CBF4C +ulMaxDebugSessions.x86=CBF50 +bFUSEnabled.x86 =CBF54 + +bServerSku.x64 =F1378 +lMaxUserSessions.x64 =F137C +bAppServerAllowed.x64 =F1380 +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 + +[10.0.17115.1-SLInit] +bInitialized.x86 =CBF38 +bServerSku.x86 =CBF3C +lMaxUserSessions.x86 =CBF40 +bAppServerAllowed.x86 =CBF44 +bRemoteConnAllowed.x86=CBF48 +bMultimonAllowed.x86 =CBF4C +ulMaxDebugSessions.x86=CBF50 +bFUSEnabled.x86 =CBF54 + +bServerSku.x64 =F1378 +lMaxUserSessions.x64 =F137C +bAppServerAllowed.x64 =F1380 +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 + +[10.0.17128.1-SLInit] +bInitialized.x86 =CBF38 +bServerSku.x86 =CBF3C +lMaxUserSessions.x86 =CBF40 +bAppServerAllowed.x86 =CBF44 +bRemoteConnAllowed.x86=CBF48 +bMultimonAllowed.x86 =CBF4C +ulMaxDebugSessions.x86=CBF50 +bFUSEnabled.x86 =CBF54 + +bServerSku.x64 =F1378 +lMaxUserSessions.x64 =F137C +bAppServerAllowed.x64 =F1380 +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 + +[10.0.17133.1-SLInit] +bInitialized.x86 =CBF38 +bServerSku.x86 =CBF3C +lMaxUserSessions.x86 =CBF40 +bAppServerAllowed.x86 =CBF44 +bRemoteConnAllowed.x86=CBF48 +bMultimonAllowed.x86 =CBF4C +ulMaxDebugSessions.x86=CBF50 +bFUSEnabled.x86 =CBF54 + +bServerSku.x64 =F1378 +lMaxUserSessions.x64 =F137C +bAppServerAllowed.x64 =F1380 +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 + +[10.0.17134.1-SLInit] +bInitialized.x86 =CBF38 +bServerSku.x86 =CBF3C +lMaxUserSessions.x86 =CBF40 +bAppServerAllowed.x86 =CBF44 +bRemoteConnAllowed.x86=CBF48 +bMultimonAllowed.x86 =CBF4C +ulMaxDebugSessions.x86=CBF50 +bFUSEnabled.x86 =CBF54 + +bServerSku.x64 =F1378 +lMaxUserSessions.x64 =F137C +bAppServerAllowed.x64 =F1380 +bInitialized.x64 =F2430 +bRemoteConnAllowed.x64=F2434 +bMultimonAllowed.x64 =F2438 +ulMaxDebugSessions.x64=F243C +bFUSEnabled.x64 =F2440 + +[10.0.17723.1000-SLInit] +bInitialized.x64 =E9AB0 +bServerSku.x64 =E9AB4 +lMaxUserSessions.x64 =E9AB8 +bAppServerAllowed.x64 =E9AC0 +bRemoteConnAllowed.x64=E9AC4 +bMultimonAllowed.x64 =E9AC8 +ulMaxDebugSessions.x64=E9ACC +bFUSEnabled.x64 =E9AD0 + +[10.0.17763.1-SLInit] +bInitialized.x86 =CD798 +bServerSku.x86 =CD79C +lMaxUserSessions.x86 =CD7A0 +bAppServerAllowed.x86 =CD7A8 +bRemoteConnAllowed.x86=CD7AC +bMultimonAllowed.x86 =CD7B0 +ulMaxDebugSessions.x86=CD7B4 +bFUSEnabled.x86 =CD7B8 + +bInitialized.x64 =ECAB0 +bServerSku.x64 =ECAB4 +lMaxUserSessions.x64 =ECAB8 +bAppServerAllowed.x64 =ECAC0 +bRemoteConnAllowed.x64=ECAC4 +bMultimonAllowed.x64 =ECAC8 +ulMaxDebugSessions.x64=ECACC +bFUSEnabled.x64 =ECAD0 diff --git a/wxid/README.md b/wxid/README.md new file mode 100644 index 0000000..31d437d --- /dev/null +++ b/wxid/README.md @@ -0,0 +1,3 @@ +# 任意微信号 生成微信名片 + + weixin://contacts/profile/ + 微信号 \ No newline at end of file