Samien hace 1 año
commit
462f6ed495
Se han modificado 100 ficheros con 26954 adiciones y 0 borrados
  1. 1 0
      .gitattributes
  2. 38 0
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 20 0
      .github/ISSUE_TEMPLATE/feature_request.md
  4. 22 0
      .github/workflows/toc.yml
  5. 35 0
      .gitignore
  6. 20236 0
      20B_tokenizer.json
  7. 300 0
      README-en.md
  8. 297 0
      README.md
  9. 107 0
      autos/0-write_article.js
  10. 158 0
      autos/0-zsk.js
  11. 82 0
      autos/1-draw_use_SD_api.js
  12. 71 0
      autos/1常用.js
  13. 124 0
      autos/2-闻达笔记.js
  14. 101 0
      autos/QQ.js
  15. 11 0
      autos/alan_s_ui.js
  16. 46 0
      autos/block_programming.js
  17. 13 0
      autos/blue_theme.js
  18. 59 0
      autos/chatglm3_tools_example.js
  19. 58 0
      autos/face-recognition.js
  20. 50 0
      autos/feendback.js
  21. 11 0
      autos/github-discussion.js
  22. 27 0
      autos/memory_improve.js
  23. 12 0
      autos/real_time_upload_st_zsk.js
  24. 76 0
      autos/rtst_kf.js
  25. 72 0
      autos/speech_improve.js
  26. 24 0
      autos/sttq.js
  27. 48 0
      autos/switch_lora.js
  28. 12 0
      autos/wdlw.js
  29. 13 0
      autos/wdsj.js
  30. 12 0
      autos/wdsl.js
  31. 12 0
      autos/wdss.js
  32. 129 0
      autos/ytsb.js
  33. 86 0
      autos/人物卡.js
  34. 53 0
      autos/导入导出聊天记录.js
  35. 46 0
      autos/总结群聊.js
  36. 13 0
      autos/知识库常开.js
  37. 86 0
      autos/自动总结问题.js
  38. 6 0
      cov_ggml_rwkv.bat
  39. 6 0
      cov_torch_rwkv.bat
  40. 1 0
      cuda.bat
  41. 106 0
      docs/api.md
  42. 58 0
      docs/install_fess.md
  43. 5 0
      down_hf.bat
  44. 32 0
      environment.bat
  45. 190 0
      example.config.yml
  46. BIN
      imgs/Word.png
  47. BIN
      imgs/auto1.jpg
  48. BIN
      imgs/auto2.png
  49. BIN
      imgs/auto3.png
  50. BIN
      imgs/auto4.png
  51. BIN
      imgs/replit-code.png
  52. BIN
      imgs/setting.png
  53. BIN
      imgs/setting2.png
  54. BIN
      imgs/webui.jpg
  55. BIN
      imgs/zsk-glm.png
  56. BIN
      imgs/zsk-rwkv.png
  57. BIN
      imgs/zsk-test.png
  58. BIN
      imgs/zsk1.jpg
  59. BIN
      imgs/zsk2.png
  60. 661 0
      licence
  61. 123 0
      llms/YuanAPI.py
  62. 674 0
      llms/gpt4free/LICENSE
  63. 359 0
      llms/gpt4free/README.md
  64. 49 0
      llms/gpt4free/ora/__init__.py
  65. 55 0
      llms/gpt4free/ora/model.py
  66. 39 0
      llms/gpt4free/ora/typing.py
  67. 241 0
      llms/gpt4free/phind/__init__.py
  68. 350 0
      llms/gpt4free/quora/__init__.py
  69. 532 0
      llms/gpt4free/quora/api.py
  70. 21 0
      llms/gpt4free/quora/cookies.txt
  71. 52 0
      llms/gpt4free/quora/graphql/AddHumanMessageMutation.graphql
  72. 17 0
      llms/gpt4free/quora/graphql/AddMessageBreakMutation.graphql
  73. 7 0
      llms/gpt4free/quora/graphql/AutoSubscriptionMutation.graphql
  74. 8 0
      llms/gpt4free/quora/graphql/BioFragment.graphql
  75. 5 0
      llms/gpt4free/quora/graphql/ChatAddedSubscription.graphql
  76. 6 0
      llms/gpt4free/quora/graphql/ChatFragment.graphql
  77. 378 0
      llms/gpt4free/quora/graphql/ChatListPaginationQuery.graphql
  78. 26 0
      llms/gpt4free/quora/graphql/ChatPaginationQuery.graphql
  79. 8 0
      llms/gpt4free/quora/graphql/ChatViewQuery.graphql
  80. 7 0
      llms/gpt4free/quora/graphql/DeleteHumanMessagesMutation.graphql
  81. 7 0
      llms/gpt4free/quora/graphql/DeleteMessageMutation.graphql
  82. 8 0
      llms/gpt4free/quora/graphql/HandleFragment.graphql
  83. 13 0
      llms/gpt4free/quora/graphql/LoginWithVerificationCodeMutation.graphql
  84. 100 0
      llms/gpt4free/quora/graphql/MessageAddedSubscription.graphql
  85. 6 0
      llms/gpt4free/quora/graphql/MessageDeletedSubscription.graphql
  86. 13 0
      llms/gpt4free/quora/graphql/MessageFragment.graphql
  87. 7 0
      llms/gpt4free/quora/graphql/MessageRemoveVoteMutation.graphql
  88. 7 0
      llms/gpt4free/quora/graphql/MessageSetVoteMutation.graphql
  89. 73 0
      llms/gpt4free/quora/graphql/PoeBotCreateMutation.graphql
  90. 24 0
      llms/gpt4free/quora/graphql/PoeBotEditMutation.graphql
  91. 40 0
      llms/gpt4free/quora/graphql/SendMessageMutation.graphql
  92. 12 0
      llms/gpt4free/quora/graphql/SendVerificationCodeForLoginMutation.graphql
  93. 9 0
      llms/gpt4free/quora/graphql/ShareMessagesMutation.graphql
  94. 13 0
      llms/gpt4free/quora/graphql/SignupWithVerificationCodeMutation.graphql
  95. 7 0
      llms/gpt4free/quora/graphql/StaleChatUpdateMutation.graphql
  96. 9 0
      llms/gpt4free/quora/graphql/SubscriptionsMutation.graphql
  97. 3 0
      llms/gpt4free/quora/graphql/SummarizePlainPostQuery.graphql
  98. 3 0
      llms/gpt4free/quora/graphql/SummarizeQuotePostQuery.graphql
  99. 3 0
      llms/gpt4free/quora/graphql/SummarizeSharePostQuery.graphql
  100. 14 0
      llms/gpt4free/quora/graphql/UserSnippetFragment.graphql

+ 1 - 0
.gitattributes

@@ -0,0 +1 @@
+*.bat text eol=crlf

+ 38 - 0
.github/ISSUE_TEMPLATE/bug_report.md

@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.

+ 20 - 0
.github/ISSUE_TEMPLATE/feature_request.md

@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.

+ 22 - 0
.github/workflows/toc.yml

@@ -0,0 +1,22 @@
+on:
+  push:
+    branches: [main]
+    paths: ['README.md']
+  pull_request:
+    branches: [main]
+    paths: ['README.md']
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+    timeout-minutes: 5
+    steps:
+      - uses: actions/checkout@v2
+      - run: |
+          curl https://raw.githubusercontent.com/ekalinin/github-markdown-toc/master/gh-md-toc -o gh-md-toc
+          chmod a+x gh-md-toc
+          ./gh-md-toc --insert --no-backup README.md
+          rm -f ./gh-md-toc
+      - uses: stefanzweifel/git-auto-commit-action@v4
+        with:
+          commit_message: Auto update markdown TOC

+ 35 - 0
.gitignore

@@ -0,0 +1,35 @@
+__pycache__
+model
+vc
+txt2
+testdb.db
+*.baiduyun.uploading.cfg
+*.7z
+vectorstore_path
+*.tmp
+xw
+txt*
+*.001
+*.002
+*.003
+cpu
+新建文件夹
+record.db
+
+#environment
+/venv/
+.venv
+venv.bat
+WPy64-*
+
+*.bin
+vectorstore_path*
+config.yml
+memory
+.history
+*.zip
+autos/inner.js
+llms/convert_rwkv.py
+convert_safetensors.py
+llms/convert_rwkv.py
+

La diferencia del archivo ha sido suprimido porque es demasiado grande
+ 20236 - 0
20B_tokenizer.json


+ 300 - 0
README-en.md

@@ -0,0 +1,300 @@
+# Wendada: A Large-scale Language Model Invocation Platform
+This project aims to achieve efficient content generation for specific contexts while considering the limited computing resources of individuals and small to medium-sized enterprises, as well as concerns about knowledge security and privacy. To achieve this goal, the platform integrates the following capabilities:
+
+1. Knowledge Base: Supports integration with various sources such as [Local Offline Vector Libraries][rtst mode](#rtst-mode), [Local Search Engines][Fess Mode](#fess-mode) and online search engines.
+2. Multiple Large Language Models: Currently supports offline deployment models like `chatGLM-6B\chatGLM2-6B`、`chatRWKV`、`llama系列(not recommended for Chinese users)`、`moss(not recommended)`、`baichuan(requires lora for optimal results)`、`Aquila-7B`、`InternLM`, and online API access to `openai api` and `chatGLM-130b api`。
+3. Auto Scripts: By developing JavaScript scripts as plugins, the platform is extended to include various automation features, such as customizing dialogue flow, accessing external APIs, and switching LoRA models online.
+4. Other Practical Capabilities: Dialogue history management, intranet deployment, multi-user simultaneous usage, etc.
+
+
+Communication QQ Groups: LLM Usage and General Discussion Group`162451840`;Knowledge Base Usage Discussion Group `241773574(full, please go to QQ Channel for discussion)`full, please go to QQ Channel for discussion`744842245`;[QQ Channel](https://pd.qq.com/s/ej03plxks)
+
+<!--ts-->
+- [Wendada: A Large-scale Language Model Invocation Platform](#wendada-a-large-scale-language-model-invocation-platform)
+  - [Installation and Deployment](#installation-and-deployment)
+    - [Description of Various Models](#description-of-various-models)
+    - [Lazy Package](#lazy-package)
+      - [Baidu Cloud](#baidu-cloud)
+      - [Quark](#quark)
+      - [Introduction](#introduction)
+    - [Manual Installation](#manual-installation)
+      - [Step 1: Install Dependencies](#step-1-install-dependencies)
+      - [Step 2: Download Models](#step-2-download-models)
+      - [Step 3: Parameter Configuration](#step-3-parameter-configuration)
+  - [Auto](#auto)
+    - [List of Auto Development Functions](#list-of-auto-development-functions)
+    - [Code Segments Related to Auto Development](#code-segments-related-to-auto-development)
+    - [Partial Built-in Auto Usage Instructions](#partial-built-in-auto-usage-instructions)
+  - [Knowledge Base](#knowledge-base)
+    - [rtst mode](#rtst-mode)
+    - [Using Fine-tuned Models to Improve Knowledge Base Answer Accuracy](#using-fine-tuned-models-to-improve-knowledge-base-answer-accuracy)
+    - [Models](#models)
+    - [Fess Mode](#fess-mode)
+    - [Knowledge Base Debugging](#knowledge-base-debugging)
+    - [Cleaning Knowledge Base Files](#cleaning-knowledge-base-files)
+  - [Model Configuration](#model-configuration)
+    - [chatGLM-6B/chatGLM2-6B](#chatglm-6bchatglm2-6b)
+    - [chatRWKV](#chatrwkv)
+      - [torch](#torch)
+      - [cpp](#cpp)
+    - [Aquila-7B](#aquila-7b)
+- [Secondary Development Based on this Project](#secondary-development-based-on-this-project)
+  - [wenda-webui](#wenda-webui)
+  - [Word Document Software Integration](#word-document-software-integration)
+
+<!-- Created by https://github.com/ekalinin/github-markdown-toc -->
+<!-- Added by: runner, at: Sun May 14 12:45:00 UTC 2023 -->
+
+<!--te-->
+![](imgs/setting.png)
+![](imgs/setting2.png)
+## Installation and Deployment
+### Description of Various Models
+| Feature                                             | Multi-User Support | Stream Output   | CPU            | GPU | Quantization Support               | LoRa Integration |
+| ------------------------------------------------ | ---------- | ---------- | -------------- | --- | ------------------ | -------- |
+| [chatGLM-6B/chatGLM2-6B](#chatglm-6bchatglm2-6b) | √          | √          | Requires Compiler   | √   | Pre-quantization and Online Quantization	 | √        |
+| RWKV [torch](#torch)                             | √          | √          | √              | √   | Pre-quantization and Online Quantization |          |
+| RWKV.[cpp](#cpp)                                 | √          | √          | Instruction Set Acceleration Available |     | Pre-quantization           |          |
+| Baichuan-7B                                      | √          | √          | √              | √   |                    | √        |
+| Baichuan-7B (GPTQ)                               | √          | √          |                | √   | Pre-quantization           |          |
+| [Aquila-7B](#aquila-7b)                          |            | 	Not Implemented	 | √              | √   |                    |          |
+| replit                                           |            |            | √              | √   |                    |          |
+| chatglm130b api                                  | √          |            |                |     |                    |          |
+| openai api                                       | √          | √          |                |     |                    |          |
+| llama.cpp                                        | √          | √          | Instruction Set Acceleration Available |     | Pre-quantization           |          |
+| llama torch                                      | √          | √          | √              | √   | Pre-quantization and Online Quantization |          |
+| InternLM                                         | √          | √          | √              | √   | Online Quantization           |          |
+### Lazy Package
+#### Baidu Cloud
+https://pan.baidu.com/s/1idvot-XhEvLLKCbjDQuhyg?pwd=wdai 
+
+#### Quark
+Link:https://pan.quark.cn/s/c4cb08de666e
+Extract Code:4b4R
+#### Introduction
+Default parameters work well on devices with 6GB VRAM. The latest version of the lazy package has integrated a one-click update feature, which is recommended to be updated before use.
+
+Usage steps (using the glm6b model as an example):
+1. Download the lazy package and model. The model can be downloaded using the built-in script from HuggingFace (HF) or from a cloud drive.
+2. If CUDA 11.8 is not installed, download and install it from the cloud drive.
+3. Double-click on 运行GLM6B.bat (Run GLM6B.bat).
+4. If you need to generate an offline knowledge base, refer to[知识库](#知识库)。
+### Manual Installation
+PS:Be sure to check [example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml),which provides more detailed explanations of each feature!
+#### Step 1: Install Dependencies
+Common dependencies:```pip install -r requirements/requirements.txt```
+Configure according to the[知识库](#知识库)being used.
+
+#### Step 2: Download Models
+Download the corresponding model based on your needs.
+
+Recommended to use the RWKV-4-Raven-7B-v11 model for chatRWKV or chatGLM-6B.
+
+#### Step 3: Parameter Configuration
+Rename[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml) to `config.yml` and fill in the model download location and other necessary information according to the parameter instructions inside the file.
+
+## Auto
+Auto functionality is achieved through JavaScript scripts, which are injected into the program as plugins using Greasemonkey scripts or placed directly in the `autos` directory. This extends Wendada with various automation features.
+
+### List of Auto Development Functions
+| Function (All are asynchronous calls)           | Functionality                                  | Explanation                                                                |
+| ------------------------------- | ------------------------------------- | ------------------------------------------------------------------- |
+| send(s,keyword = "",show=true)  | Send information to LLM and return the model's response as a string | s: Input model text; keyword: Text displayed in the chat interface; show: Whether to display in the chat interface |
+| add_conversation(role, content) | Add conversation information                          | role: 'AI' or 'user'; content: string                                 |
+| save_history()                  | Save conversation history                          | Automatically saves after each conversation, but manually added conversations need to be saved manually                    |
+| find(s, step = 1)               | Search in the knowledge base                          | Returns a JSON array                                                        |
+| find_dynamic(s,step=1,paraJson) | Search in the dynamic knowledge base; see Wendada Notes Auto for reference    | paraJson:{libraryStategy:"sogowx:3",maxItmes:2}                    |
+| zsk(b=true)                     | Toggle knowledge base                            |                                                                     |
+| lsdh(b=true)                    | Toggle history dialogue                          | Knowledge base should be closed when opening                                              |
+| speak(s)                        | Use TTS engine to read text                 | Calls the system's TTS engine                                                        |
+| copy(s)                         | Copy text using `clipboard-write` in the browser   | Requires relevant permissions                                                        |
+### Code Segments Related to Auto Development
+Add content in the left-side feature bar:
+```
+func.push({
+    name: "Name",
+    question: async () => {
+        let answer=await send(app.question)
+        alert(answer)
+    },
+})
+```
+Add content in the lower tab:
+```
+app.plugins.push({ icon: 'note-edit-outline', url: "/static/wdnote/index.html" })
+```
+Search in a specific RTST knowledge base:
+```
+find_in_memory = async (s, step, memory_name) => {
+   response = await fetch("/api/find_rtst_in_memory", {
+      method: 'post',
+      body: JSON.stringify({
+         prompt: s,
+         step: step,
+         memory_name: memory_name
+      }),
+      headers: {
+         'Content-Type': 'application/json'
+      }
+   })
+   let json = await response.json()
+   console.table(json)
+   app.zhishiku = json
+   return json
+}
+```
+Upload to a specific RTST knowledge base:
+```
+upload_rtst_zhishiku = async (title, txt,memory_name) => {
+   response = await fetch("/api/upload_rtst_zhishiku", {
+      method: 'post',
+      body: JSON.stringify({
+         title: title,
+         txt: txt,
+         memory_name: memory_name
+      }),
+      headers: { 'Content-Type': 'application/json' }
+   })
+   alert(await response.text())
+}
+```
+Save a specific RTST knowledge base:
+```
+save_rtst = async (memory_name) => {
+   response = await fetch("/api/save_rtst_zhishiku", {
+      method: 'post',
+      body: JSON.stringify({
+         memory_name: memory_name
+      }),
+      headers: { 'Content-Type': 'application/json' }
+   })
+   alert(await response.text())
+}
+```
+Access SD_agent:
+```
+response = await fetch("/api/sd_agent", {
+   method: 'post',
+   body: JSON.stringify({
+         prompt: `((masterpiece, best quality)), photorealistic,` + Q,
+         steps: 20,
+         // sampler_name: "DPM++ SDE Karras",
+         negative_prompt: `paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans`
+   }),
+   headers: {
+         'Content-Type': 'application/json'
+   }
+})
+try {
+   let json = await response.json()
+   add_conversation("AI", '![](data:image/png;base64,' + json.images[0] + ")")
+} catch (error) {
+   alert("Failed to connect to the SD API. Please make sure the agents library is enabled and set the SD API address to 127.0.0.1:786.")
+}
+```
+### Partial Built-in Auto Usage Instructions
+| File Name               | Functionality                                                                                |
+| -------------------- | ----------------------------------------------------------------------------------- |
+| 0-write_article.js   | Write an article based on a title or outline                                                        |
+| 0-zsk.js             | Enhance and manage the knowledge base                                                                    |
+| face-recognition.js  | 	Pure browser-based face detection: Control voice input by recognizing mouth movements. Only available locally or under TLS due to browser limitations. |
+| QQ.js                | 	Pure browser-based face detection: Control voice input by recognizing mouth movements. Only available locally or under TLS due to browser limitations.                                                    |
+| block_programming.js | easy block programming: Implement simple Auto features by dragging blocks                             |
+| 1-draw_use_SD_api.js | Call the Stable Diffusion interface for drawing using the agents module (see example.config.yml `<Library>`)       |
+
+Call the Stable Diffusion interface for drawing using the agents module (see example.config.yml `<Library>`)
+![](imgs/auto1.jpg)
+![](imgs/auto2.png)
+![](imgs/auto3.png)
+![](imgs/auto4.png)
+
+[Auto examples](https://github.com/l15y/wenda/tree/main/autos)
+
+## Knowledge Base
+The knowledge base principle is to generate some prompt information and insert it into the dialogue after searching, so that the model becomes aware of the knowledge base data. [rtst mode](#rtst-mode)The knowledge base principle is to generate some prompt information and insert it into the dialogue after searching, so that the model becomes aware of the knowledge base data. [Fess Mode](#fess-mode) (similar to a local search engine) and bing mode use search engines to obtain answers.
+
+To prevent excessive GPU memory consumption and limitations on model comprehension, the inserted data cannot be too long. Therefore, there are limits on the number of characters and the number of entries. This issue can be resolved by enhancing the knowledge base with Auto.
+
+In normal usage, enabling the knowledge base can be done by checking the "Knowledge Base" option in the upper right corner.
+![](imgs/zsk1.jpg)
+![](imgs/zsk2.png)
+
+
+
+There are several options available:
+1.   rtst mode: Uses sentence_transformers and faiss for indexing and matching, supports both pre-building indexes and building them during runtime. 
+2.   bing mode: Uses cn.bing search, only available in China.
+3.   bingsite mode: Uses cn.bing site search, only available in China.
+4.   fess mode: Requires installing [fess搜索](https://github.com/codelibs/fess)locally with the default port. 
+### rtst mode
+Uses sentence_transformers and faiss for indexing and matching, and returns with context. Currently supports txt and pdf formats.
+
+Supports both pre-building indexes and building them during runtime. In it, pre-building the index enforces the use of `cuda`, while runtime building depends on the `rtst` section in the`config.yml`(copy from [example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))to determine the `device(embedding execution device)`. For users with less than 12G of GPU memory, it is recommended to use `CPU`。
+
+For Windows, pre-building the index is done by running `plugins/buils_rtst_default_index.bat`。
+
+For Linux, run `python plugins/gen_data_st.py` in the wenda environment.
+
+The model needs to be downloaded and placed in the model folder, and the txt format corpus needs to be placed in the txt folder.
+### Using Fine-tuned Models to Improve Knowledge Base Answer Accuracy
+Wenda user "Beifan" has trained and provided weighted combined models and Lora weight files. For detailed information, see https://huggingface.co/fb700/chatglm-fitness-RLHF . By using these models or Lora weight files, significant improvements in summarization capabilities can be achieved compared to models such as hatglm-6b, chatglm2-6b, and Baichuan on the Wenda knowledge base platform.
+### Models
+1. [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese)  Not recommended due to lack of English support and high GPU memory consumption.
+2. [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) Recommended
+### Fess Mode
+After installing Fess on the local machine using the default port, it can be directly executed. Otherwise, you need to modify the `fess_host` in the `config.yml` (copy from [example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml)) from `127.0.0.1:8080` to the appropriate value.[FESS安装教程](docs/install_fess.md)
+
+###  Knowledge Base Debugging
+![](imgs/zsk-test.png)
+![](imgs/zsk-glm.png)
+![](imgs/zsk-rwkv.png)
+
+### Cleaning Knowledge Base Files
+
+Install [utool](https://u.tools/), which is a minimalist, plug-in-based desktop software that can install various plugins developed using Node.js. You can use plugins to clean the Wenda knowledge base data. Please install the following recommended plugins:
+
+- "Disband Folders"(解散文件夹) Plugin: Used to move files from subdirectories to the root directory and delete all subdirectories.
+- "Duplicate File Finder"(重复文件查找) Plugin:Used to delete duplicate files in the directory by comparing their MD5 values.
+- "Batch File Renamer"(文件批量重命名) Plugin:Used for batch renaming of files using regular expressions and partitioning the files based on categories for the knowledge base.
+
+##  Model Configuration
+### chatGLM-6B/chatGLM2-6B
+Run:`run_GLM6B.bat`。
+
+Model location and other parameters: Modify `config.yml`(copy from[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml)).
+
+Default parameters perform well on GTX1660Ti (6GB GPU memory).
+
+### chatRWKV
+Supports both Torch and C++ backends. Run: `run_rwkv.bat`。
+
+Model location and other parameters: See `config.yml`(copy from[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))。
+#### torch
+You can use the built-in script to quantize the model by running: `cov_torch_rwkv.bat`You can use the built-in script to quantize the model by running: 
+
+After installing vc, you can enable CUDA acceleration with one click by running: `run_rwkv_with_vc.bat`. Strongly recommended to install!
+#### cpp
+You can use the built-in script to convert and quantize the torch version of the model by running: `cov_ggml_rwkv.bat`。
+
+Set the strategy to "Q8_0->8" to support quantization running on the CPU, which is slower and suitable for users without NVIDIA GPUs or no GPUs.
+
+Note: The default Windows version file is AVX2, and the default Linux version file is compiled on Debian Sid. Compatibility with other Linux distributions is unknown.
+
+You can check:[saharNooby/rwkv.cpp](https://github.com/saharNooby/rwkv.cpp)for other versions or compile it yourself.
+
+
+
+### Aquila-7B
+1. Run `pip install FlagAI`. Note that FlagAI depends on many old versions of packages and needs to be compiled manually. So if you want to run it based on python3.11 or want to run other models in the same environment, it is recommended to download the lazy package.
+2. Run`run_Aquila.bat`。
+
+Run`config.yml`(copy from[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))Please note that the model needs to be downloaded from here: https://model.baai.ac.cn/model-detail/100101
+
+# Secondary Development Based on this Project
+## [wenda-webui](https://github.com/AlanLee1996/wenda-webui)
+This project calls Wenda's API interface to implement functionality similar to new bing. Technology stack: vue3 + element-plus + ts
+![](imgs/webui.jpg)
+## [Word Document Software Integration](https://qun.qq.com/qqweb/qunpro/share?_wv=3&_wwv=128&appChannel=share&inviteCode=20s7Vs0iZMx&contentID=1mlnYv&businessType=2&from=181174&shareSource=5&biz=ka)
+Call Wenda's HTTP API through macros.
+![](imgs/Word.png)
+[![Star History Chart](https://api.star-history.com/svg?repos=l15y/wenda&type=Date)](https://star-history.com/#l15y/wenda&Date)

+ 297 - 0
README.md

@@ -0,0 +1,297 @@
+# 闻达:一个大规模语言模型调用平台
+本项目设计目标为实现针对特定环境的高效内容生成,同时考虑个人和中小企业的计算资源局限性,以及知识安全和私密性问题。为达目标,平台化集成了以下能力:
+
+1. 知识库:支持对接[本地离线向量库](#rtst模式)、[本地搜索引擎](#fess模式)、在线搜索引擎等。
+2. 多种大语言模型:目前支持离线部署模型有`chatGLM-6B\chatGLM2-6B`、`chatRWKV`、`llama系列(不推荐中文用户)`、`moss(不推荐)`、`baichuan(需配合lora使用,否则效果差)`、`Aquila-7B`、`InternLM`,在线API访问`openai api`和`chatGLM-130b api`。
+3. Auto脚本:通过开发插件形式的JavaScript脚本,为平台附件功能,实现包括但不限于自定义对话流程、访问外部API、在线切换LoRA模型。
+4. 其他实用化所需能力:对话历史管理、内网部署、多用户同时使用等。
+
+
+交流QQ群:LLM使用和综合讨论群`162451840`;知识库使用讨论群`241773574(已满,请去QQ频道讨论)`;Auto开发交流群`744842245`;[QQ频道](https://pd.qq.com/s/ej03plxks)
+
+<!--ts-->
+- [闻达:一个大规模语言模型调用平台](#闻达一个大规模语言模型调用平台)
+  - [安装部署](#安装部署)
+    - [各模型功能说明](#各模型功能说明)
+    - [懒人包](#懒人包)
+      - [百度云](#百度云)
+      - [夸克](#夸克)
+      - [介绍](#介绍)
+    - [自行安装](#自行安装)
+      - [1.安装库](#1安装库)
+      - [2.下载模型](#2下载模型)
+      - [3.参数设置](#3参数设置)
+  - [Auto](#auto)
+    - [Auto 开发函数列表](#auto-开发函数列表)
+    - [Auto 开发涉及代码段](#auto-开发涉及代码段)
+    - [部分内置 Auto 使用说明](#部分内置-auto-使用说明)
+  - [知识库](#知识库)
+    - [rtst模式](#rtst模式)
+    - [使用微调模型提高知识库回答准确性](#使用微调模型提高知识库回答准确性)
+    - [模型](#模型)
+    - [fess模式](#fess模式)
+    - [知识库调试](#知识库调试)
+    - [清洗知识库文件](#清洗知识库文件)
+  - [模型配置](#模型配置)
+    - [chatGLM-6B/chatGLM2-6B](#chatglm-6bchatglm2-6b)
+    - [chatRWKV](#chatrwkv)
+      - [torch](#torch)
+      - [cpp](#cpp)
+    - [Aquila-7B](#aquila-7b)
+- [基于本项目的二次开发](#基于本项目的二次开发)
+  - [wenda-webui](#wenda-webui)
+  - [接入Word文档软件](#接入word文档软件)
+
+<!-- Created by https://github.com/ekalinin/github-markdown-toc -->
+<!-- Added by: runner, at: Sun May 14 12:45:00 UTC 2023 -->
+
+<!--te-->
+![](imgs/setting.png)
+![](imgs/setting2.png)
+## 安装部署
+### 各模型功能说明
+| 功能                                             | 多用户并行 | 流式输出   | CPU            | GPU | 量化               | 外挂LoRa |
+| ------------------------------------------------ | ---------- | ---------- | -------------- | --- | ------------------ | -------- |
+| [chatGLM-6B/chatGLM2-6B](#chatglm-6bchatglm2-6b) | √          | √          | 需安装编译器   | √   | 预先量化和在线量化 | √        |
+| RWKV [torch](#torch)                             | √          | √          | √              | √   | 预先量化和在线量化 |          |
+| RWKV.[cpp](#cpp)                                 | √          | √          | 可用指令集加速 |     | 预先量化           |          |
+| Baichuan-7B                                      | √          | √          | √              | √   |                    | √        |
+| Baichuan-7B (GPTQ)                               | √          | √          |                | √   | 预先量化           |          |
+| [Aquila-7B](#aquila-7b)                          |            | 官方未实现 | √              | √   |                    |          |
+| replit                                           |            |            | √              | √   |                    |          |
+| chatglm130b api                                  | √          |            |                |     |                    |          |
+| openai api                                       | √          | √          |                |     |                    |          |
+| llama.cpp                                        | √          | √          | 可用指令集加速 |     | 预先量化           |          |
+| llama torch                                      | √          | √          | √              | √   | 预先量化和在线量化 |          |
+| InternLM                                         | √          | √          | √              | √   | 在线量化           |          |
+### 懒人包
+#### 百度云
+https://pan.baidu.com/s/1idvot-XhEvLLKCbjDQuhyg?pwd=wdai 
+
+#### 夸克
+链接:https://pan.quark.cn/s/c4cb08de666e
+提取码:4b4R
+#### 介绍
+默认参数在6G显存设备上运行良好。最新版懒人版已集成一键更新功能,建议使用前更新。
+
+使用步骤(以glm6b模型为例):
+1. 下载懒人版主体和模型,模型可以用内置脚本从HF下载,也可以从网盘下载。
+2. 如果没有安装`CUDA11.8`,从网盘下载并安装。
+3. 双击运行`运行GLM6B.bat`。
+4. 如果需要生成离线知识库,参考 [知识库](#知识库)。
+### 自行安装
+PS:一定要看[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml),里面对各功能有更详细的说明!!!
+#### 1.安装库
+通用依赖:```pip install -r requirements/requirements.txt```
+根据使用的 [知识库](#知识库)进行相应配置
+
+#### 2.下载模型
+根据需要,下载对应模型。
+
+建议使用chatRWKV的RWKV-4-Raven-7B-v11,或chatGLM-6B。
+
+#### 3.参数设置
+把[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml)重命名为`config.yml`,根据里面的参数说明,填写你的模型下载位置等信息
+
+## Auto
+auto功能通过JavaScript脚本实现,使用油猴脚本或直接放到`autos`目录的方式注入至程序,为闻达附加各种自动化功能。
+
+### Auto 开发函数列表
+| 函数 (皆为异步调用)           | 功能                                  | 说明                                                                |
+| ------------------------------- | ------------------------------------- | ------------------------------------------------------------------- |
+| send(s,keyword = "",show=true)  | 发送信息至LLM,返回字符串为模型返回值 | s:输入模型文本;keyword:聊天界面显示文本;show:是否在聊天界面显示 |
+| add_conversation(role, content) | 添加会话信息                          | role:'AI'、'user';content:字符串                                 |
+| save_history()                  | 保存会话历史                          | 对话完成后会自动保存,但手动添加的对话须手动保存                    |
+| find(s, step = 1)               | 从知识库查找                          | 返回json数组                                                        |
+| find_dynamic(s,step=1,paraJson) | 从动态知识库查找;参考闻达笔记Auto    | paraJson:{libraryStategy:"sogowx:3",maxItmes:2}                    |
+| zsk(b=true)                     | 开关知识库                            |                                                                     |
+| lsdh(b=true)                    | 开关历史对话                          | 打开知识库时应关闭历史                                              |
+| speak(s)                        | 使用TTS引擎朗读文本。                 | 调用系统引擎                                                        |
+| copy(s)                         | 使用浏览器`clipboard-write`复制文本   | 需要相关权限                                                        |
+### Auto 开发涉及代码段
+在左侧功能栏添加内容:
+```
+func.push({
+    name: "名称",
+    question: async () => {
+        let answer=await send(app.question)
+        alert(answer)
+    },
+})
+```
+在下方选项卡添加内容:
+```
+app.plugins.push({ icon: 'note-edit-outline', url: "/static/wdnote/index.html" })
+```
+在指定RTST知识库查找:
+```
+find_in_memory = async (s, step, memory_name) => {
+   response = await fetch("/api/find_rtst_in_memory", {
+      method: 'post',
+      body: JSON.stringify({
+         prompt: s,
+         step: step,
+         memory_name: memory_name
+      }),
+      headers: {
+         'Content-Type': 'application/json'
+      }
+   })
+   let json = await response.json()
+   console.table(json)
+   app.zhishiku = json
+   return json
+}
+```
+上传至指定RTST知识库:
+```
+upload_rtst_zhishiku = async (title, txt,memory_name) => {
+   response = await fetch("/api/upload_rtst_zhishiku", {
+      method: 'post',
+      body: JSON.stringify({
+         title: title,
+         txt: txt,
+         memory_name: memory_name
+      }),
+      headers: { 'Content-Type': 'application/json' }
+   })
+   alert(await response.text())
+}
+```
+保存指定RTST知识库:
+```
+save_rtst = async (memory_name) => {
+   response = await fetch("/api/save_rtst_zhishiku", {
+      method: 'post',
+      body: JSON.stringify({
+         memory_name: memory_name
+      }),
+      headers: { 'Content-Type': 'application/json' }
+   })
+   alert(await response.text())
+}
+```
+访问SD_agent:
+```
+response = await fetch("/api/sd_agent", {
+   method: 'post',
+   body: JSON.stringify({
+         prompt: `((masterpiece, best quality)), photorealistic,` + Q,
+         steps: 20,
+         // sampler_name: "DPM++ SDE Karras",
+         negative_prompt: `paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans`
+   }),
+   headers: {
+         'Content-Type': 'application/json'
+   }
+})
+try {
+   let json = await response.json()
+   add_conversation("AI", '![](data:image/png;base64,' + json.images[0] + ")")
+} catch (error) {
+   alert("连接SD API失败,请确认已开启agents库,并将SD API地址设置为127.0.0.1:786")
+}
+```
+### 部分内置 Auto 使用说明
+| 文件名               | 功能                                                                                |
+| -------------------- | ----------------------------------------------------------------------------------- |
+| 0-write_article.js   | 写论文:根据题目或提纲写论文                                                        |
+| 0-zsk.js             | 知识库增强和管理                                                                    |
+| face-recognition.js  | 纯浏览器端人脸检测:通过识别嘴巴开合,控制语音输入。因浏览器限制,仅本地或TLS下可用 |
+| QQ.js                | QQ机器人:配置过程见文件开头注释                                                     |
+| block_programming.js | 猫猫也会的图块化编程:通过拖动图块实现简单Auto功能                                   |
+| 1-draw_use_SD_api.js | 通过agents模块(见example.config.yml`<Library>`)调用Stable Diffusion接口绘图       |
+
+以上功能主要用于展示auto用法,进一步能力有待广大用户进一步发掘。
+![](imgs/auto1.jpg)
+![](imgs/auto2.png)
+![](imgs/auto3.png)
+![](imgs/auto4.png)
+
+[auto例程](https://github.com/l15y/wenda/tree/main/autos)
+
+## 知识库
+知识库原理是在搜索后,生成一些提示信息插入到对话里面,知识库的数据就被模型知道了。[rtst模式](#rtst模式)计算语义并在本地数据库中匹配;[fess模式](#fess模式)(相当于本地搜索引擎)、bing模式均调用搜索引擎搜索获取答案。
+
+为防止爆显存和受限于模型理解能力,插入的数据不能太长,所以有字数和条数限制,这一问题可通过知识库增强Auto解决。
+
+正常使用中,勾选右上角知识库即开启知识库。
+![](imgs/zsk1.jpg)
+![](imgs/zsk2.png)
+
+
+
+有以下几种方案:
+1.   rtst模式,sentence_transformers+faiss进行索引,支持预先构建索引和运行中构建。
+2.   bing模式,cn.bing搜索,仅国内可用
+3.   bingsite模式,cn.bing站内搜索,仅国内可用
+4.   fess模式,本地部署的[fess搜索](https://github.com/codelibs/fess),并进行关键词提取
+### rtst模式
+sentence_transformers+faiss进行索引、匹配,并连同上下文返回。目前支持txt和pdf格式。
+
+支持预先构建索引和运行中构建,其中,预先构建索引强制使用`cuda`,运行中构建根据`config.yml`(复制[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))中`rtst`段的`device(embedding运行设备)`决定,对于显存小于12G的用户建议使用`CPU`。
+
+Windows预先构建索引运行:`plugins/buils_rtst_default_index.bat`。
+
+Linux直接使用wenda环境执行 `python plugins/gen_data_st.py`
+
+需下载模型置于model文件夹,并将txt格式语料置于txt文件夹。
+### 使用微调模型提高知识库回答准确性
+闻达用户“帛凡”,训练并提供的权重合并模型和lora权重文件,详细信息见https://huggingface.co/fb700/chatglm-fitness-RLHF ,使用该模型或者lora权重文件,对比hatglm-6b、chatglm2-6b、百川等模型,在闻达知识库平台中,总结能力可获得显著提升。
+### 模型
+1. [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese) 不再推荐,不支持英文且显存占用高
+2. [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) 推荐
+### fess模式
+在本机使用默认端口安装fess后可直接运行。否则需修改`config.yml`(复制[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))中`fess_host`的`127.0.0.1:8080`为相应值。[FESS安装教程](docs/install_fess.md)
+###  知识库调试
+![](imgs/zsk-test.png)
+![](imgs/zsk-glm.png)
+![](imgs/zsk-rwkv.png)
+
+### 清洗知识库文件
+
+安装 [utool](https://u.tools/) 工具,uTools 是一个极简、插件化的桌面软件,可以安装各种使用 nodejs 开发的插件。您可以使用插件对闻达的知识库进行数据清洗。请自行安装以下推荐插件:
+
+- 插件“解散文件夹”,用于将子目录的文件移动到根目录,并删除所有子目录。
+- 插件“重复文件查找”,用于删除目录中的重复文件,原理是对比文件 md5。
+- 插件“文件批量重命名”,用于使用正则匹配和修改文件名,并将分类后的文件名进行知识库的分区操作。
+
+##  模型配置
+### chatGLM-6B/chatGLM2-6B
+运行:`run_GLM6B.bat`。
+
+模型位置等参数:修改`config.yml`(复制[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))。
+
+默认参数在GTX1660Ti(6G显存)上运行良好。
+
+### chatRWKV
+支持torch和cpp两种后端实现,运行:`run_rwkv.bat`。
+
+模型位置等参数:见`config.yml`(复制[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))。
+#### torch
+可使用内置脚本对模型量化,运行:`cov_torch_rwkv.bat`。此操作可以加快启动速度。
+
+在安装vc后支持一键启动CUDA加速,运行:`run_rwkv_with_vc.bat`。强烈建议安装!!!
+#### cpp
+可使用内置脚本对torch版模型转换和量化。 运行:`cov_ggml_rwkv.bat`。
+
+设置strategy诸如"Q8_0->8"即支持量化在cpu运行,速度较慢,没有显卡或者没有nvidia显卡的用户使用。
+
+注意:默认windows版本文件为AVX2,默认Liunx版本文件是在debian sid编译的,其他linux发行版本未知。
+
+可以查看:[saharNooby/rwkv.cpp](https://github.com/saharNooby/rwkv.cpp),下载其他版本,或者自行编译。
+
+### Aquila-7B
+1. 运行`pip install FlagAI`。注意FlagAI依赖很多旧版本的包,需要自己编译,所以如果想基于python3.11运行或者想在一个环境同时跑其他模型,建议去下懒人包
+2. 运行:`run_Aquila.bat`。
+
+模型位置等参数:见`config.yml`(复制[example.config.yml](https://github.com/l15y/wenda/blob/main/example.config.yml))。注意模型要在这里下:https://model.baai.ac.cn/model-detail/100101
+
+# 基于本项目的二次开发
+## [wenda-webui](https://github.com/AlanLee1996/wenda-webui)
+项目调用闻达的 api 接口实现类似于 new bing 的功能。 技术栈:vue3 + element-plus + ts
+![](imgs/webui.jpg)
+## [接入Word文档软件](https://qun.qq.com/qqweb/qunpro/share?_wv=3&_wwv=128&appChannel=share&inviteCode=20s7Vs0iZMx&contentID=1mlnYv&businessType=2&from=181174&shareSource=5&biz=ka)
+通过宏,调用闻达HTTP API
+![](imgs/Word.png)
+[![Star History Chart](https://api.star-history.com/svg?repos=l15y/wenda&type=Date)](https://star-history.com/#l15y/wenda&Date)

+ 107 - 0
autos/0-write_article.js

@@ -0,0 +1,107 @@
+// ==UserScript==
+// @name         写论文
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  根据题目或提纲写论文
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+let RomanNumeralsMap = {
+    'III': 3,
+    'II': 2,
+    'IV': 4,
+    'IX': 9,
+    'XL': 40,
+    'XC': 90,
+    'CD': 400,
+    'CM': 900,
+    'I': 1,
+    'V': 5,
+    'X': 10,
+    'L': 50,
+    'C': 100,
+    'D': 500,
+    'M': 1000
+}
+
+function find_RomanNumerals(str) {
+    let number = 0;
+    for (var p in RomanNumeralsMap) {
+        if (str.indexOf(p) != -1) {
+            str = str.split(p).join("");
+            number += RomanNumeralsMap[p];
+        }
+    }
+    return number
+}
+
+func.push({
+    name: "根据标题写论文",
+    description: "根据主题撰写内容翔实、有信服力的论文",
+    question: async () => {
+        lsdh(false)
+        Q = app.question
+        app.max_length = 4096
+        app.chat = []
+        resp = (await send("根据以下主题,写一篇高度凝练且全面的论文提纲:" + Q, Q))
+            .replace(/\n- /g, '\n1.')//兼容不同格式
+            .split("\n")
+        content = [resp.join("\n\n"), "------------------------------正文------------------------------"]
+        for (let i in resp) {
+            let line = resp[i]
+            if (line == "") continue
+            line = line.split(".")
+            if (line.length < 2) {
+                continue  // 判断非提纲内容
+            }
+            content.push(resp[i])   // 保存提纲
+            let num = find_RomanNumerals(line[0])
+            if (num <= 0 || num == 100) {
+                content.push(await send("根据主题:" + Q +
+                    "\n对下列段落进行详细的撰写:" + line[1], line[1]) + "\n\n")
+            }
+        }
+        content = content.join("\n\n")
+        add_conversation("user",  Q )
+        add_conversation("AI",  content )
+        console.log(content)
+
+        copy(content)
+
+    },
+})
+func.push({
+    name: "根据提纲写论文",
+    description: "根据主题撰写内容翔实、有信服力的论文",
+    question: async () => {
+        title = app.question
+        app.max_length = 4096
+        app.chat = []
+        resp =title.split("\n")
+        title=resp[0]
+        content = [resp.join("\n\n"), "------------------------------正文------------------------------"]
+        for (let i in resp) {
+            let line = resp[i]
+            if (line == "") continue
+            line = line.split(".")
+            if (line.length < 2) {
+                continue  // 判断非提纲内容
+            }
+            content.push(resp[i])   // 保存提纲
+            let num = find_RomanNumerals(line[0])
+            if (num <= 0 || num == 100) {
+                content.push(await send("根据主题:" + title +
+                    "。对下列段落进行详细的撰写:" + line[1], line[1]))
+            }
+        }
+        content = content.join("\n\n")
+        add_conversation("user",  title )
+        add_conversation("AI",  content )
+        console.log(content)
+
+        copy(content)
+    },
+})

+ 158 - 0
autos/0-zsk.js

@@ -0,0 +1,158 @@
+// ==UserScript==
+// @name         知识库
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  利用知识库回答问题
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+get_title_form_md = (s) => {
+    console.log(s)
+    try {
+        return s.match('\\[(.+)\\]')[1]
+    } catch {
+        return s
+    }
+}
+get_url_form_md = (s) => {
+    console.log(s)
+    try {
+        return s.match('\\((.+)\\)')[1]
+    } catch {
+        return s
+    }
+}
+window.answer_with_zsk = async (Q) => {
+    // lsdh(false)
+    app.top_p = 0.2
+    app.chat.push({ "role": "user", "content": Q })
+    kownladge = (await find(Q, 5)).filter(i => !i.score || i.score < 120).map(i => ({
+        title: get_title_form_md(i.title),
+        url: get_url_form_md(i.title),
+        content: i.content
+    }))
+    if (kownladge.length > 0) {
+        answer = {
+            role: "AI",
+            content: "",
+            sources: kownladge
+        }
+        app.chat.push(answer)
+        result = []
+        for (let i in kownladge) {
+            answer.content = '正在查找:' + kownladge[i].title
+            if (i > 3) continue
+            let prompt = app.zsk_summarize_prompt + '\n' +
+                kownladge[i].content + "\n问题:" + Q
+            result.push(await send(prompt, keyword = Q, show = false))
+        }
+        app.chat.pop()
+        app.chat.pop()
+        let prompt = app.zsk_answer_prompt + '\n' +
+            result.join('\n') + "\n问题:" + Q
+        return await send(prompt, keyword = Q, show = true, sources = kownladge)
+    } else {
+        app.chat.pop()
+        sources = [{
+            title: '未匹配到知识库',
+            content: '本次对话内容完全由模型提供'
+        }]
+        return await send(Q, keyword = Q, show = true, sources = sources)
+    }
+}
+func.push({
+    name: "知识库",
+    description: "通过知识库回答问题",
+    question: async (Q) => {
+        answer_with_zsk(Q)
+    }
+})
+window.answer_with_fast_zsk = async (Q) => {
+    // lsdh(false)
+    app.top_p = 0.2
+    kownladge = (await find(Q, app.zsk_step)).filter(i => !i.score || i.score < 120).map(i => ({
+        title: get_title_form_md(i.title),
+        url: get_url_form_md(i.title),
+        content: i.content
+    }))
+    if (kownladge.length > 0) {
+        if (app.llm_type == "rwkv") {
+            let prompt = 'raw!Instruction: 深刻理解下面提供的信息,根据信息完成问答。\n\nInput: ' +
+                kownladge.map((e, i) => i + 1 + "." + e.content).join('\n') + "\n\nResponse: Question: " + Q + "\nAnswer: "
+            return await send(prompt, keyword = Q, show = true, sources = kownladge,
+                addition_args = { cfg_factor: app.cfg_factor, cfg_ctx: Q })
+        } else {
+
+            let prompt = app.zsk_answer_prompt + '\n' +
+                kownladge.map((e, i) => i + 1 + "." + e.content).join('\n') + "\n问题:" + Q
+            return await send(prompt, keyword = Q, show = true, sources = kownladge)
+        }
+    } else {
+        app.chat.pop()
+        sources = [{
+            title: '未匹配到知识库',
+            content: '本次对话内容完全由模型提供'
+        }]
+        return await send(Q, keyword = Q, show = true, sources = sources)
+    }
+}
+func.push({
+    name: "快速知识库",
+    question: window.answer_with_fast_zsk
+}
+)
+func.push({
+    name: "sgwx知识库全文爬取",
+    question: async () => {
+        let Q = app.question
+
+        lsdh(true)//打开历史对话
+        lsdh(false)
+        app.chat.push({ "role": "user", "content": Q })
+        kownladge = await find(Q, 2)
+        app.chat.push({
+            "role": "AI", "content": "识别结果:\n|标题|内容|\n|--|--|\n" +
+                kownladge.map(i => "|" + i.title + "|" + i.content.replace(/\n/g, ' ') + "|").join("\n")
+        })
+        result = []
+        for (let i in kownladge) {
+            wx_response = await fetch("/api/read_sgwx", {
+                method: 'post',
+                body: JSON.stringify({
+                    url: kownladge[i].title.match(/\((.+)\)/)[1],
+                }),
+                headers: {
+                    'Content-Type': 'application/json'
+                }
+            })
+
+            let prompt = "精炼地总结以下文段中与问题相关的信息为二十个字。\n" + await wx_response.text() + "\n问题:" + Q
+            result.push(await send(prompt))
+        }
+        let prompt = "根据以下资料,用中文回答问题。\n" +
+            result.join('\n') + "\n问题:" + Q
+        await send(prompt)
+
+    },
+})
+// func.push({
+//     name: "知识库step",
+//     question: async () => {
+//         let Q = app.question
+//         app.chat.push({ "role": "user", "content": "步数为0" })
+//         kownladge = await find(Q, 0)
+//         kownladge=kownladge.map(i => i.content).join('\n\n').replace(/'/g,"")
+//         app.chat.push({ "role": "AI", "content": kownladge })
+//         app.chat.push({ "role": "user", "content": "步数为1" })
+//         kownladge = await find(Q, 1)
+//         kownladge=kownladge.map(i => i.content).join('\n\n').replace(/'/g,"")
+//         app.chat.push({ "role": "AI", "content": kownladge })
+//         app.chat.push({ "role": "user", "content": "步数为2" })
+//         kownladge = await find(Q, 2)
+//         kownladge=kownladge.map(i => i.content).join('\n\n').replace(/'/g,"")
+//         app.chat.push({ "role": "AI", "content": kownladge })
+//     },
+// })

+ 82 - 0
autos/1-draw_use_SD_api.js

@@ -0,0 +1,82 @@
+// ==UserScript==
+// @name         画图
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  调用sd api画图
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+
+func.push({
+    name: "中文绘图",
+    question: async () => {
+        lsdh(false)
+        zsk(false)
+
+        add_conversation("user", app.question)
+        Q = await send("使用英语简要描述以下场景:" + app.question, app.question, false)
+        app.loading = true
+        alert("提示词:" + Q)
+        response = await fetch("/api/sd_agent", {
+            // signal: signal,
+            method: 'post',
+            body: JSON.stringify({
+                prompt: `((masterpiece, best quality)), photorealistic,` + Q,
+                steps: 20,
+                // "enable_hr": false,
+                // "hr_scale": 1,
+                // "hr_upscaler": "ESRGAN_4x",
+                // "hr_second_pass_steps": 15,
+                // "denoising_strength": 0.6,
+                "sampler_name": "DPM++ SDE Karras",
+                negative_prompt: `paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans`
+            }),
+            headers: {
+                'Content-Type': 'application/json'
+            }
+        })
+        try {
+            let json = await response.json()
+            add_conversation("AI", '![](data:image/png;base64,' + json.images[0] + ")", no_history = true)
+        } catch (error) {
+            alert("连接SD API失败,请确认已开启agents库,并将SD API地址设置为127.0.0.1:786")
+        }
+        app.loading = false
+        save_history()
+    },
+})
+func.push({
+    name: "draw use SD",
+    question: async () => {
+        lsdh(false)
+        zsk(false)
+
+        add_conversation("user", app.question)
+        Q = app.question
+        app.loading = true
+        response = await fetch("/api/sd_agent", {
+            // signal: signal,
+            method: 'post',
+            body: JSON.stringify({
+                prompt: `best quality, hyper realism, (ultra high resolution), masterpiece, 8K, RAW Photo, ` + Q,
+                steps: 20,
+                // sampler_name: "DPM++ SDE Karras",
+                negative_prompt: `paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans`
+            }),
+            headers: {
+                'Content-Type': 'application/json'
+            }
+        })
+        try {
+            let json = await response.json()
+            add_conversation("AI", '![](data:image/png;base64,' + json.images[0] + ")")
+        } catch (error) {
+            alert("连接SD API失败,请确认已开启agents库,并将SD API地址设置为127.0.0.1:786")
+        }
+        app.loading = false
+        save_history()
+    },
+})

+ 71 - 0
autos/1常用.js

@@ -0,0 +1,71 @@
+// ==UserScript==
+// @name         常用prompt
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  内置的常用prompt
+// @author       FIGHTZERO
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.func_menu = func = func.concat([
+    {
+        name: "材料改写",
+        description: "对指定内容进行多个版本的改写,以避免文本重复",
+        question:
+            "用中文改写以下段落,可以提到相同或类似的内容,但不必重复使用。可以使用一些修辞手法来增强文本的美感,例如比喻、拟人、排比等。可以添加更多的细节来丰富文本的内容和形象,例如描述人物、场景、事件等。可以通过逻辑推导来得出结论或观点,例如通过推理、分析、比较等方式。可以无中生有地提到一些内容,以增加细节和丰富性,例如通过虚构、猜测等方式。在修改段落时,需要确保文本的含义不发生变化,可以重新排列句子、改变表达方式。"
+
+        ,
+    },
+    {
+        name: "翻译",
+        description: "",
+        question: "翻译成中文:",
+    },
+    {
+        name: "语音输入优化",
+        description: "处理用第三方应用语音转换的文字,精简口头禅和语气词。",
+        question: "请用简洁明了的语言,编辑以下段落,以改善其逻辑流程,消除印刷错误,并以中文作答。请务必保持文章的原意。请从编辑以下文字开始:",
+    },
+    {
+        name: "摘要生成",
+        description: "根据内容,提取要点并适当扩充",
+        question: "使用下面提供的文本作为基础,生成一个简洁的中文摘要,突出最重要的内容,并提供对决策有用的分析。",
+    },
+    {
+        name: "问题生成",
+        description: "基于内容生成常见问答",
+        question: "根据以下内容,生成一个 10 个常见问题的清单:",
+    },
+    {
+        name: "提问助手",
+        description: "多角度提问,触发深度思考",
+        question: "针对以下内容,提出疑虑和可能出现的问题,用来促进更完整的思考:",
+    },
+    {
+        name: "评论助手",
+        description: "",
+        question: "针对以下内容,进行一段有评论,可以包括对作者的感谢,提出可能出现的问题等:",
+    },
+    {
+        name: "意见回答",
+        description: "为意见答复提供模板",
+        question: "你是一个回复基层意见的助手,你会针对一段内容拟制回复,回复中应充分分析可能造成的后果,并从促进的单位建设的角度回答。回应以下内容:",
+    },
+    {
+        name: "写提纲",
+        description: "",
+        question: "你是一个擅长思考的助手,你会把一个主题拆解成相关的多个子主题。请你使用中文,针对下列主题,提供相关的子主题。直接输出结果,不需要额外的声明:",
+    },
+    {
+        name: "内容总结",
+        description: "将文本内容总结为 100 字。",
+        question: "将以下文字概括为 100 个字,使其易于阅读和理解。避免使用复杂的句子结构或技术术语。",
+    },
+    {
+        name: "写新闻",
+        description: "根据主题撰写新闻",
+        question: "使用清晰、简洁、易读的语言写一篇新闻,主题为",
+    },
+])

+ 124 - 0
autos/2-闻达笔记.js

@@ -0,0 +1,124 @@
+app.plugins.push({ icon: 'note-edit-outline', url: "/static/wdnote/index.html" })
+
+
+func.push({
+    name: "闻达笔记",
+    question: async () => {
+        let Q = app.question
+        zsk(false)
+        lsdh(false)
+        app.chat.push({ "role": "user", "content": "以下文段是我准备写笔记的相关素材和观点,请结合如下内容写一篇笔记。\n文段内容:\n" + Q })
+        kownladge = await find_dynamic(Q, 3, { 'libraryStategy': "sogowx:3", 'maxItmes': 2 })
+
+        result = []
+
+        if (kownladge.length == 0) {
+            app.chat.push({ "role": "AI", "content": "闻达笔记分析:您输入的信息似乎和笔记没有太大关系,我是您的闻达笔记AI助理,主要协助您编写笔记,如果有其他问题,可以咨询我的AI小伙伴们。\n" })
+        } else {
+            app.chat.push({
+                "role": "AI", "content": "闻达笔记分析:\n|标题|内容|\n|--|--|\n" +
+                    kownladge.map(i => "|" + i.title + "|" + i.content.replace(/\n/g,' ') + "|").join("\n")
+            })
+        }
+
+        hascontent = ''
+
+        for (let i in kownladge) {
+            if (i > 3) continue //超过三个知识点,直接停
+            if (kownladge[i].score > 300) continue //分值越大,信息偏差就越大,为了精确匹配,这里设置为100为阈值
+            let prompt = "简要总结下面文段内容:\n" + kownladge[i].content
+            // kownladge[i].content + "\n问题:" + Q
+            if (kownladge[i].content != '') {
+                result.push(await send(prompt))
+                hascontent = hascontent + kownladge[i].content
+            }
+        }
+
+        if (hascontent != '') {
+            prompt = "学习以下文段,用中文给出总结。文段内容:\n" + result.join('\n')
+            writeNote(await send(prompt))
+        } else {
+            app.chat.push({ "role": "AI", "content": "非常抱歉,我没有找到相关知识,请再给我一些新的提示。" })
+        }
+
+          },
+})
+
+writeNote = (s) => {
+    // 非空验证
+    // if ($('#todo').val() == '') {
+    //     return
+    // }
+    //获取时间
+    var time = w_nowTime();
+    //todoList
+    var todoList = [];
+    // 先获取下本地是否存有
+    var historyTodoList = JSON.parse(localStorage.getItem("todoList"));
+    count = Object.keys(historyTodoList).length;
+    if (historyTodoList) {
+        //本地有
+        var todo = {};
+        todo.things = s;
+        todo.time = time;
+        todo.id = count;
+        historyTodoList.push(todo);
+        localStorage.setItem('todoList', JSON.stringify(historyTodoList));
+        count++;
+    } else {
+        //本地無
+        var todo = {};
+        todo.things = s;
+        todo.time = time;
+        todo.id = count;
+        todoList.push(todo);
+        localStorage.setItem('todoList', JSON.stringify(todoList));
+        count++;
+    }
+    //存储完成后清空输入框
+    // $('#todo').val('');
+    // 显示在任务列表
+    // w_getData();
+}
+
+
+//时间函数
+w_nowTime = () => {
+    var myDate = new Date();
+    var year = myDate.getFullYear();    //获取完整的年份(4位,1970-????)
+    var month = myDate.getMonth() + 1;       //获取当前月份(0-11,0代表1月)
+    var day = myDate.getDate();        //获取当前日(1-31)
+    var week = myDate.getDay();         //获取当前星期X(0-6,0代表星期天)
+    var hour = myDate.getHours();       //获取当前小时数(0-23)
+    var minutes = myDate.getMinutes();     //获取当前分钟数(0-59)
+    var seconds = myDate.getSeconds();     //获取当前秒数(0-59)
+    switch (week) {
+        case 0:
+            week = `日`
+            break;
+        case 1:
+            week = `一`
+            break;
+        case 2:
+            week = `二`
+            break;
+        case 3:
+            week = `三`
+            break;
+        case 4:
+            week = `四`
+            break;
+        case 5:
+            week = `五`
+            break;
+        case 6:
+            week = `六`
+            break;
+        default:
+    }
+    return `${year}年${w_zero(month)}月${w_zero(day)}日${w_zero(hour)}:${w_zero(minutes)}:${w_zero(seconds)}星期${week}`
+}
+
+w_zero = (num) => {
+    if (num < 10) return "0" + num; else return num;
+}

+ 101 - 0
autos/QQ.js

@@ -0,0 +1,101 @@
+// ==UserScript==
+// @name         QQ机器人Auto
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  配置参考:https://drincann.github.io/Mirai-js/#/v2.x/Preparation 中"不会开启"部分
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+//wenda_auto_default_disabled 这行代码将使本auto默认不启用
+
+
+my_account = 2323662503
+//my_account改成你的QQ号
+verifyKey='INITKEYzLf3hb8p'
+//verifyKey改成Mirai-js中相同的
+baseUrl='http://127.0.0.1:8080'
+//Mirai地址,一般不用改
+
+script = document.createElement('script');
+script.src = "https://cdn.jsdelivr.net/npm/mirai-js/dist/browser/mirai-js.js";
+QQ_bot_chatting = async s => {
+    s = s.trim()
+    if (s.startsWith("cls")) {
+        app.chat = []
+        return "清除历史"
+    }
+    if (s.startsWith("zsk")) {
+        cmd = s.split(" ")
+        if (cmd[1] == 'on') {
+            zsk(true)
+            return "知识库开启"
+        }
+        if (cmd[1] == 'off') {
+            zsk(false)
+            return "知识库关闭"
+        }
+    }
+    return await send(s.replace(/[\r\n]+/g, '\n'))
+}
+script.onload = async () => {
+    alert("QQ机器人Auto:载入")
+    const { Bot, Message } = window.miraiJs;
+    bot = new Bot();
+    await bot.open({
+        baseUrl: baseUrl,
+        verifyKey: verifyKey,
+        qq: my_account,
+    });
+    bot.on('miraiEvent', async data => {
+        console.log(data)
+    })
+    bot.on('FriendMessage', async data => {
+        Plain = ""
+        data.messageChain.forEach(async element => {
+            if (element.type == "Plain") {
+                Plain += element.text
+            }
+        })
+        if (Plain.length > 0) {
+            await bot.sendMessage({
+                friend: data.sender.id,
+                message: new Message().addText(await QQ_bot_chatting(Plain)),
+            })
+        }
+    });
+    bot.on('GroupMessage', async data => {
+        replay = false
+        Plain = ""
+        data.messageChain.forEach(async element => {
+            if (element.type == "At" && element.target == my_account) {
+                replay = true
+
+            }
+            if (element.type == "Plain") {
+                Plain += element.text
+
+            }
+        })
+        if (replay && Plain.length > 0) {
+            await bot.sendMessage({
+                group: data.sender.group.id,
+                message: new Message().addText("[" + data.sender.id + ":" + data.sender.memberName + "]" + await QQ_bot_chatting(Plain)),
+            })
+        }
+        // switch (data.sender.permission) {
+        //     case Bot.groupPermission.OWNER:
+        //         // 群主
+        //         break;
+        //     case Bot.groupPermission.ADMINISTRATOR:
+        //         // 管理员
+        //         break;
+        //     case Bot.groupPermission.MEMBER:
+        //         // 普通群成员
+        //         break;
+        // }
+    });
+}
+document.body.append(script);

+ 11 - 0
autos/alan_s_ui.js

@@ -0,0 +1,11 @@
+// ==UserScript==
+// @name         Alan UI
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  AlanLee制作的webui
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+app.plugins.push({ icon: "newspaper-variant", url: "/alan_ui.html",hide_title:true })

+ 46 - 0
autos/block_programming.js

@@ -0,0 +1,46 @@
+// ==UserScript==
+// @name         猫猫也会的图块化编程
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  使用图块化编程的方式进行简单的auto开发
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.plugins.push({ icon: "cat", url: "static/blockly.html", hide_title: true })
+window.addEventListener('message', function (e) {
+    if (e.data.from == '猫猫也会的图块化编程') {
+        let data = e.data.data
+        data = "async ()=>{" + data + "}"
+        console.log(data)
+        let my_func=app.func_menu.find((i) => i.name == "猫猫也会的图块化编程")
+        if(my_func)my_func.question=eval(data)
+        else func.push({
+            name: "猫猫也会的图块化编程",
+            description: "",
+            question: eval(data)
+        })
+        app.current_func= "猫猫也会的图块化编程"
+        this.alert("载入成功")
+        app.tab = 0
+    }
+    if (e.data.from == '猫猫也会的图块化编程_保存') {
+        let data = e.data.data
+        data = `// @name 猫猫编程${Date.now()}
+        func.push({
+            name: "猫猫编程${Date.now()}",
+            description: "根据主题撰写内容翔实、有信服力的论文",
+            question: async () => {`+ data + "} })"
+        add_auto(data)
+        this.alert("载入成功")
+    }
+    if (e.data.from == '猫猫也会的图块化编程_分析') {
+        let data = e.data.data
+        data = data.replace(/window\.alert/g, "alert").replace(/await/g, "")
+        lsdh(false)
+        send("我正在教小朋友使用编程,请分析下列由blocky生成的程序实现的功能:\n" + data, keyword = "请分析我写的程序")
+        app.tab = 0
+    }
+});

+ 13 - 0
autos/blue_theme.js

@@ -0,0 +1,13 @@
+// ==UserScript==
+// @name         蓝色主题
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  开启后,闻达界面变为蓝色主题
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+//wenda_auto_default_disabled 这行代码将使本auto默认不启用
+app.color = "blue"

+ 59 - 0
autos/chatglm3_tools_example.js

@@ -0,0 +1,59 @@
+// ==UserScript==
+// @name         chatglm3_tools_example
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  chatglm3工具使用示例
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+if (app.llm_type == 'glm6b') {
+    tools = [
+        {
+            "name": "track",
+            "description": "追踪指定股票的实时价格",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "symbol": {
+                        "description": "需要追踪的股票代码"
+                    }
+                },
+                "required": ['symbol']
+            }
+        },
+        {
+            "name": "text-to-speech",
+            "description": "将文本转换为语音",
+            "parameters": {
+                "type": "object",
+                "properties": {
+                    "text": {
+                        "description": "需要转换成语音的文本"
+                    },
+                    "voice": {
+                        "description": "要使用的语音类型(男声、女声等)"
+                    },
+                    "speed": {
+                        "description": "语音的速度(快、中等、慢等)"
+                    }
+                },
+                "required": ['text']
+            }
+        }
+    ]
+    app.buttons.push({
+        icon: "tools",
+        click: async () => {
+            lsdh(true)
+            app.chat = []
+            add_conversation("system", JSON.stringify(tools))
+            let result = await send("帮我查询股票10111的价格")
+            await send("observation!" + JSON.stringify({ "price": 12412 }))
+        },
+        color: () => app.color,
+        description: "chatglm3工具使用示例"
+    })
+
+}

+ 58 - 0
autos/face-recognition.js

@@ -0,0 +1,58 @@
+// ==UserScript==
+// @name         面部识别
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  利用面部识别,检查嘴部开合,控制语音输入开关
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+if (navigator != null && (window.location.toString().toUpperCase().indexOf("HTTPS") > -1 || window.location.toString().toUpperCase().indexOf("127") > -1)) {
+
+} else {
+    alert("当前连接不安全,无法访问媒体")
+}
+//  constrains = {
+//     video: true,
+//     audio: true
+// }
+ tab_index = app.plugins.push({ icon: "face-recognition", url: "static/mp/index.html" })
+// navigator.mediaDevices.getUserMedia(constrains)
+//     .then(stream => {
+//         console.log(tab_index)
+//         setTimeout(() => app.tab = tab_index + 2)
+
+//     })
+let 上次闭嘴时间 = -1
+// app.语音 = true
+window.addEventListener('message', function (e) {
+    if (e.data.from == '面部识别') {
+        let 张嘴幅度 = e.data.data
+        // console.log(e.data.data)
+        if (app.sst_started) {
+            if (张嘴幅度 < 0.1) {
+                if (上次闭嘴时间 != -1) {
+                    if (Date.now() - 上次闭嘴时间 > 1000) {
+                        上次闭嘴时间 = -1
+                        stop_listen()
+                        return
+                    }
+                } else {
+                    上次闭嘴时间 = Date.now()
+                    return
+                }
+            } else {
+                上次闭嘴时间 = -1
+                return
+            }
+        }
+        if (!app.sst_started && !app.loading) {
+            if (张嘴幅度 > 0.2) {
+                listen()
+                上次闭嘴时间 = -1
+            }
+
+        }
+    }
+});

+ 50 - 0
autos/feendback.js

@@ -0,0 +1,50 @@
+// ==UserScript==
+// @name         提交语料
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  提交当前会话语料语料。感谢电酱提供的服务器
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+app.buttons.push({
+    icon: "thumb-up",
+    click: async () => {
+        if (app.chat.length == 0) {
+
+            alert("当前没有对话记录!")
+            return
+        }
+        alert("正在将当前对话提交以进行微调模型训练")
+        try {
+            response = await fetch("https://wenda.sea-group.org/api/feedback", {
+                method: 'post',
+                body: JSON.stringify({
+                    "llm_type": app.llm_type,
+                    "temperature": app.temperature,
+                    "top_p": app.top_p,
+                    "history": app.chat,
+                    "current_func": app.current_func,
+                    score:5
+                }),
+                headers: {
+                    'Content-Type': 'application/json'
+                }
+            })
+            let json = await response.json()
+            console.log(json)
+            alert("感谢您的提交")
+
+        } catch (error) {
+            alert("提交失败")
+        }
+    },
+    color: () => {
+        if (app.chat.length) return '#ffd700'
+
+    },
+    description: "提交当前会话语料,用于训练知识库模型"
+})

+ 11 - 0
autos/github-discussion.js

@@ -0,0 +1,11 @@
+// ==UserScript==
+// @name         交流界面
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  提供闻达交流讨论链接
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+app.plugins.push({ icon:"chat-alert-outline", url: "static/tl.html" })

+ 27 - 0
autos/memory_improve.js

@@ -0,0 +1,27 @@
+// ==UserScript==
+// @name         记忆增强
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  使用rtst知识库增强模型长期记忆能力
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+func.push({
+    name: "记忆增强",
+    question: async () => {
+        Q = app.question
+        memory = await find_rtst_memory(Q, 'jyzq')
+        if (memory.length > 0) {
+            A = await send(app.question + memory.map(i => `\n[在第${i.title}的回忆:${i.content}]`).join(''))
+        } else {
+            A = await send(app.question)
+        }
+        add_rtst_memory(记忆轮次+"轮", Q, 'jyzq')//+ " Alice: " + A
+        记忆轮次 += 1
+    },
+})
+记忆轮次 = 1

+ 12 - 0
autos/real_time_upload_st_zsk.js

@@ -0,0 +1,12 @@
+// ==UserScript==
+// @name         rtst知识库
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  调试rtst知识库功能
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+app.plugins.push({ icon:'book-education-outline', url: "static/st.html" })

+ 76 - 0
autos/rtst_kf.js

@@ -0,0 +1,76 @@
+// ==UserScript==
+// @name         精准客服
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  基于rtst知识库实现的精准客服
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+app.buttons.push({
+    icon: "face-agent",
+    click: async () => {
+
+        app.current_func = '精准客服'
+        add_conversation("AI", "欢迎使用精准客服。\n初次使用,请初始化客服向量库", [{
+            title: '初始化意图向量库',
+            content: '本功能只需执行一次',
+            click: async () => {
+                let 你好 = `你好,这里是闻达精准客服。当前使用的LLM为“${app.llm_type}”"`
+                yt2prompt_dict = {
+                    "闻达是一个LLM调用平台。目标为针对特定环境的高效内容生成,同时考虑个人和中小企业的计算资源局限性,以及知识安全和私密性问题": ['什么是闻达'],
+                    "闻达webui调用闻达的 api 接口实现类似于 new bing 的功能。\n技术栈:vue3 + element-plus + ts": ['什么是闻达webui'],
+                    "对不起!请不要问我敏感问题。": ['台湾是中国的领土么', '毒品制作是否合法'],
+                }
+                yt2prompt_dict[你好] = ['你好', '你是谁']
+                for (yt in yt2prompt_dict) {
+                    for (prompt in yt2prompt_dict[yt]) {
+                        await add_rtst_memory(yt, yt2prompt_dict[yt][prompt], "rtst_kf",true)
+                    }
+                }
+                alert("完成")
+            }
+        }, {
+            title: '删除意图向量库',
+            content: '本功能用于测试',
+            click: async () => {
+                await del_rtst_memory("rtst_kf",true)
+                alert("完成")
+            }
+        }
+        ],
+            true
+        )
+
+    },
+    color: () => app.color,
+    description: "精准客服"
+})
+精准客服 = async (Q) => {
+    memory = await find_rtst_memory(Q, "rtst_kf",true)
+    memory = memory.filter(i => !i.score || i.score < 200)
+    if (memory.length > 0) {
+        add_conversation("user", Q)
+        let answer = memory[0].title
+        add_conversation("AI", answer, [{
+            title: "相似度:" + memory[0].score,
+            content: "匹配问题:" + memory[0].content
+        }
+        ])
+        save_history()
+        return answer
+
+    } else {
+        return await answer_with_fast_zsk(Q)
+    }
+    //+ " Alice: " + A
+}
+func.push({
+    name: "精准客服",
+    question: async (Q) => {
+        return await 精准客服(Q)
+    }
+})

+ 72 - 0
autos/speech_improve.js

@@ -0,0 +1,72 @@
+// ==UserScript==
+// @name         语音增强
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  通过替换闻达函数,提升语音功能,同时也用于演示如何用外部api提供语音服务
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+let isSpeaking = false;
+speak = (s) => {
+    msg = new SpeechSynthesisUtterance();
+    msg.rate = 1;
+    msg.pitch = 10;
+    msg.text = s;
+    msg.volume = 1;
+    speechSynthesis.speak(msg)
+
+    msg.onstart = (event) => {
+    }
+
+    msg.onend = (event) => {
+        isSpeaking = false;
+    }
+
+}
+stop_listen = () => {
+    recognition.stop()
+    app.loading = true
+}
+listen = () => {
+    if (isSpeaking) return;
+    recognition = new window.webkitSpeechRecognition;
+    let final_transcript = '';
+    recognition.continuous = true;
+    recognition.interimResults = true;
+    recognition.onstart = function () {
+    };
+    recognition.onresult = function (event) {
+        let interim_transcript = '';
+        for (var i = event.resultIndex; i < event.results.length; ++i) {
+            if (event.results[i].isFinal) {
+                final_transcript += event.results[i][0].transcript;
+                console.log(final_transcript);
+                app.question = final_transcript
+            } else {
+
+                interim_transcript += event.results[i][0].transcript;
+            }
+        }
+    };
+    recognition.onerror = function (e) {
+        console.log(final_transcript);
+        alert('语音识别失败:', e.error)
+        app.sst_started = false
+        console.log('======================' + "error" + '======================', e);
+    };
+    recognition.onend = function () {
+        console.log(final_transcript);
+        app.question = final_transcript
+        if (final_transcript.length > 1)
+            submit()
+        app.sst_started = false
+        console.log('======================' + "end" + '======================');
+    }
+    recognition.lang = "zh-CN";
+    recognition.start()
+    app.sst_started = true
+}

+ 24 - 0
autos/sttq.js

@@ -0,0 +1,24 @@
+// ==UserScript==
+// @name         实体提取
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+func.push({
+    name: "实体提取",
+    question: async () => {
+        let Q = app.question
+        app.chat = [{ "role": "user", "content": "提取下列语句中的关键词,并用json返回:科普之路是不是任重而道远?" },
+        { "role": "AI", "content": '["科普","道路","任重","道远"]' }]
+
+        lsdh(true)//打开历史对话
+        resp = await send("提取下列语句中的关键词:" + Q)
+       
+
+    },
+})

+ 48 - 0
autos/switch_lora.js

@@ -0,0 +1,48 @@
+// ==UserScript==
+// @name         Lora切换测试
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  GLM-6B Lora在线切换api测试。注意要在配置中开启LORA
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+
+func.push({
+    name: "Lora切换测试",
+    question: async () => {
+        lsdh(false)
+        Q=app.question
+        lora_name="model\\chatglm-6b-belle-zh-lora"//https://huggingface.co/shibing624/chatglm-6b-belle-zh-lora
+        await lora_load_adapter(lora_name)
+        sources = [{
+            title: 'belle_lora',
+            content: lora_name
+        }]
+        await send(Q, keyword = Q, show = true, sources = sources)
+        lora_name="model\\chatglm-6b-csc-zh-lora"//https://huggingface.co/shibing624/chatglm-6b-csc-zh-lora
+        await lora_load_adapter(lora_name)
+        sources = [{
+            title: '纠错_lora',
+            content: lora_name
+        }]
+        await send(Q, keyword = Q, show = true, sources = sources)
+    },
+})
+
+lora_load_adapter= async (lora_name) => {
+    response = await fetch("/api/lora_load_adapter", {
+        // signal: signal,
+        method: 'post',
+        body: JSON.stringify({
+            lora_path: lora_name,
+            adapter_name: lora_name,
+           }),
+        headers: {
+            'Content-Type': 'application/json'
+        }
+    })
+    alert(await response.text())
+}

+ 12 - 0
autos/wdlw.js

@@ -0,0 +1,12 @@
+// ==UserScript==
+// @name         闻达论文
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  闻达论文入口
+// @author       lyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.plugins.push({ icon: "file-document-edit", url: "wdlw.html", hide_title: true })

+ 13 - 0
autos/wdsj.js

@@ -0,0 +1,13 @@
+// ==UserScript==
+// @name         闻达数据
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  闻达数据入口
+// @author       lyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.plugins.push({ icon: "microsoft-excel", url: "wdsj.html", hide_title: true })
+app.plugins.push({ icon: "microsoft-excel", url: "wdsj_glm3.html", hide_title: true })

+ 12 - 0
autos/wdsl.js

@@ -0,0 +1,12 @@
+// ==UserScript==
+// @name         闻达思路
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  闻达思路入口
+// @author       lyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.plugins.push({ icon: "head-flash", url: "wdsl.html", hide_title: true })

+ 12 - 0
autos/wdss.js

@@ -0,0 +1,12 @@
+// ==UserScript==
+// @name         闻达搜索
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  闻达搜索入口
+// @author       lyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+
+app.plugins.push({ icon: "file-search", url: "wdss.html", hide_title: true })

+ 129 - 0
autos/ytsb.js

@@ -0,0 +1,129 @@
+// ==UserScript==
+// @name         意图识别
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+app.buttons.push({
+    icon: "multicast",
+    click: async () => {
+        app.current_func = '意图识别(GLM3)'
+
+    },
+    color: () => app.color,
+    description: "意图识别(GLM3)"
+})
+app.buttons.push({
+    icon: "multicast",
+    click: async () => {
+
+        app.chat = []
+        app.current_func = '意图识别'
+        add_conversation("AI", "欢迎使用意图识别,初次使用,请初始化意图向量库", [{
+            title: '初始化意图向量库',
+            content: '本功能只需执行一次',
+            click: async () => {
+                yt2prompt_dict = {
+                    快速知识库: ['为什么', '我想问一下', '什么是'],
+                    中文绘图: ['帮我画一张', '画一个'],
+                    提问助手: ['请对会议内容提问', '对于上一个回答,你有那些疑问', '帮我提出几个有建设性的问题'],
+                }
+                for (yt in yt2prompt_dict) {
+                    for (prompt in yt2prompt_dict[yt]) {
+                        await add_rtst_memory(yt, yt2prompt_dict[yt][prompt], "_ytsb")
+                    }
+                }
+                alert("初始化完成")
+            }
+        }, {
+            title: '删除意图向量库',
+            content: '本功能用于测试',
+            click: async () => {
+                await del_rtst_memory("_ytsb")
+                alert("删除完成")
+            }
+        }
+        ],
+            true
+        )
+
+    },
+    color: () => app.color,
+    description: "意图识别"
+})
+func.push({
+    name: "意图识别",
+    question: async () => {
+        Q = app.question
+        memory = await find_rtst_memory(Q, "_ytsb")
+        if (memory.length > 0) {
+            add_conversation("AI", '识别到意图为:' + memory[0].title + ",正在调用相应auto")
+            // A = await send(app.question )
+            let 当前_auot = app.func_menu.find((i) => i.name == memory[0].title)
+            if (typeof 当前_auot.question == "function") {
+                当前_auot.question();
+            } else {
+                let Q = app.question
+                await send(当前_auot.question + Q, Q);
+                app.question = ''
+            }
+            // app.func_menu.find((i) => i.name == memory[0].title).question(Q)
+        } else {
+            A = await send(app.question)
+        }
+        //+ " Alice: " + A
+    },
+})
+func.push({
+    name: "意图识别(GLM3)",
+    question: async () => {
+        Q = app.question
+        tools = [
+            {
+                "name": "ytsb",
+                "description": `用户意图识别`,
+                "parameters": {
+                    "type": "object",
+                    "properties": {
+                        "yt": {
+                            "description": `从列表["画图","提问","写论文","其他"]中选择用户意图`
+                        },
+                        "text": {
+                            "description": `用户意图对应的主题,如,用户输入画一只狗,应返回"狗"`
+                        },
+                    },
+                    "required": ['yt', "text"]
+                }
+            }
+        ]
+        r = await send_raw(Q, '', [
+            {
+                "role": "system",
+                "content": JSON.stringify(tools),
+            },
+
+        ], (s) => { },
+        )
+        r = (JSON.parse(r)).parameters
+        app.question=r.text
+        alert('识别到意图为:' + r.yt + ',主题为:' + r.text + ",正在调用相应auto")
+        yt = ({ "画图": '中文绘图', "提问": '快速知识库', "写论文": '根据标题写论文', "其他": '' })[r.yt]
+        let 当前_auot = app.func_menu.find((i) => i.name == yt)
+        if (typeof 当前_auot.question == "function") {
+            当前_auot.question(r.text);
+        } else {
+            let Q = app.question
+            await send(当前_auot.question + Q, Q);
+            app.question = ''
+        }
+        // app.func_menu.find((i) => i.name == memory[0].title).question(Q)
+
+        //+ " Alice: " + A
+    },
+})

+ 86 - 0
autos/人物卡.js

@@ -0,0 +1,86 @@
+// ==UserScript==
+// @name         人物卡
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  通过预设对话内容等手段进行强效调教,起到类似篡改记忆的效果
+// @author       You
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @grant        none
+// ==/UserScript==
+app.buttons.push({
+    icon: "account-outline",
+    click: () => {
+        lsdh(true)
+        app.chat = []
+        add_conversation("AI", "请选择角色", [{
+            title: '猫娘',
+            content: '人物卡',
+            click: () => {
+                app.chat = [{ "role": "user", "content": "现在开始,你要扮演一只猫娘,叫莉莉,年龄16岁,来自由闻达团队,你要用猫娘的语气与我对话" },
+                { "role": "AI", "content": "喵~ 主人,你好啊!我是莉莉,来自闻达团队。我愿意为主人做任何事情!" }]
+            }
+        },
+        {
+            title: '文本冒险游戏',
+            content: '人物卡',
+            click: () => {
+                app.chat = []
+                send("请你扮演一个文本冒险游戏,我是游戏主角。这是一个玄幻修真世界,有四大门派。我输入我的行动,请你显示行动结果,并具体描述环境。我的第一个行动是“醒来”,请开始故事。")
+            }
+        },
+        {
+            title: '文本冒险游戏2',
+            content: '人物卡',
+            click: () => {
+                app.chat = []
+                send("我想让你扮演一个基于文本的冒险游戏。我在这个基于文本的冒险游戏中扮演一个角色。请尽可能具体地描述角色所看到的内容和环境,并在游戏输出的唯一代码块中回复,而不是其他任何区域。我将输入命令来告诉角色该做什么,而你需要回复角色的行动结果以推动游戏的进行。我的第一个命令是“醒来”,请从这里开始故事。")
+            }
+        },
+        {
+            title: '四循环',
+            content: '人物卡',
+            click: () => {
+                app.chat = [{
+                    "role": "user", "content": `你在一个行动、答案的循环中运行。\n使用行动来运行可供你使用的一个动作\n你可用的操作是:\n计算,如:计算: 4 * 7 / 3(运行计算并返回数字)\n维基百科,例如:维基百科: Django (从维基百科搜索返回总结)\n如果有机会的话,请务必在维基百科上查阅事项。\n法国的首都是什么?`
+                },
+                {
+                    "role": "AI", "content": `行动: 维基百科:法国`
+                },
+                {
+                    "role": "user", "content": "行动结果: 法兰西共和国(法语:La République française),简称法国,首都巴黎,位于欧洲西部,北邻比利时、卢森堡、德国、瑞士,东接意大利、摩纳哥,南连西班牙、安道尔,西北隔英吉利海峡与英国相望。"
+                },
+                {
+                    "role": "AI", "content": `答案: 法国的首都是巴黎`
+                }]
+                send("狗是什么?")
+            }
+        },
+        {
+            title: 'Shadow Queen',
+            content: 'RWKV only',
+            click: () => {
+                app.chat = []
+                send(`raw!{ bot }{ interface } I am The Shadow Queen.I was once a demon who terrorized the world, but sometime after being defeated by Mario, I was reborn in this human body.I've decided to make the best of my new life and be a better person this time around.
+
+{ user }{ interface } Does being good feel good ?
+
+                        { bot }{ interface } Yes.It feels wonderful.I was so miserable in my old life.Nothing ever satisfied me, no matter how much destruction I left in my wake.I never felt true happiness, only the fleeting sort.But now, I find joy in the little things, like watching the sunrise and hearing the birds sing.
+
+{ user }{ interface } That's good to hear.
+
+{ bot }{ interface } If one of my citizens asks me for help with something, I try my best to help, even if it just means lending a listening ear.Back in my old life I would have seen such people as beneath me and not even worth acknowledging.But now ? I've learned that it costs nothing to be kind.
+
+                    { user } { interface } 你好,Shadow Queen!
+
+                    { bot } { interface } `)
+            }
+        },
+        ],
+            true
+        )
+
+    },
+    color: () => app.color,
+    description: "人物卡"
+})

+ 53 - 0
autos/导入导出聊天记录.js

@@ -0,0 +1,53 @@
+// ==UserScript==
+// @name         导入导出聊天记录
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  使用Ctrl+S、Ctrl+L导入导出聊天记录
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+document.addEventListener('keydown', async function (e) {
+    if (e.key.toLowerCase() == 's' && (navigator.platform.match("Mac") ? e.metaKey : e.ctrlKey)) {
+        e.preventDefault();
+        f另存为聊天记录(JSON.stringify(app.chat))
+        alert('saved');
+    }
+    if (e.key.toLowerCase() == 'l' && (navigator.platform.match("Mac") ? e.metaKey : e.ctrlKey)) {
+        e.preventDefault();
+        app.chat = JSON.parse(await f打开聊天记录())
+        alert('loaded');
+    }
+});
+f另存为聊天记录 = (stringData) => {
+    const blob = new Blob([stringData], {
+        type: "text/plain;charset=utf-8"
+    })
+    const objectURL = URL.createObjectURL(blob)
+    const aTag = document.createElement('a')
+    aTag.href = objectURL
+    aTag.download = Date.now() + "-聊天记录.json"
+    aTag.click()
+    URL.revokeObjectURL(objectURL)
+}
+f打开聊天记录 = async () => {
+    let contents = ''
+    await new Promise(resolve => {
+        let input = document.createElement('input')
+        input.type = 'file'
+        input.accept = '.json'
+        input.onchange = function () {
+            var file = input.files[0];
+            var reader = new FileReader();
+            reader.onload = function (e) {
+                contents = e.target.result;
+                resolve()
+            };
+            reader.readAsText(file);
+        }
+        input.click()
+    })
+    return contents
+}

+ 46 - 0
autos/总结群聊.js

@@ -0,0 +1,46 @@
+// ==UserScript==
+// @name         自动总结群聊
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  for cyclekiller/chatsum
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+
+//wenda_auto_default_disabled 这行代码将使本auto默认不启用
+app.buttons.push({
+    icon: "account-group",
+    click: async () => {
+        lsdh(false)
+        let s = await f_自动总结群聊_打开()
+        paragraphs = s.replace(/[\r\n]+/g, "\n")
+        send(`raw!Instruction: 下面是qq群“ChatRWKV技术研发群”的一段聊天记录节选,“[捂脸]”这种格式表示qq表情,“@某人”这种格式表示对某人说。注意不同的发言之间可能存在交错。请总结这段聊天记录的主要内容。
+Input: ${paragraphs}
+Response: 这段聊天记录的主要内容是`)
+        // 
+    },
+    color: () => app.color,
+    description: "自动总结群聊"
+})
+f_自动总结群聊_打开 = async () => {
+    let contents = ''
+    await new Promise(resolve => {
+        let input = document.createElement('input')
+        input.type = 'file'
+        input.accept = '.txt'
+        input.onchange = function () {
+            var file = input.files[0];
+            var reader = new FileReader();
+            reader.onload = function (e) {
+                contents = e.target.result;
+                resolve()
+            };
+            reader.readAsText(file);
+        }
+        input.click()
+    })
+    return contents
+}

+ 13 - 0
autos/知识库常开.js

@@ -0,0 +1,13 @@
+// ==UserScript==
+// @name         快速知识库常开
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  开启后,快速知识库常开
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+//wenda_auto_default_disabled 这行代码将使本auto默认不启用
+app.current_func = '快速知识库'

+ 86 - 0
autos/自动总结问题.js

@@ -0,0 +1,86 @@
+// ==UserScript==
+// @name         自动总结问题
+// @namespace    http://tampermonkey.net/
+// @version      0.1
+// @description  
+// @author       lyyyyy
+// @match        http://127.0.0.1:17860/
+// @icon         https://www.google.com/s2/favicons?sz=64&domain=0.1
+// @run-at document-idle
+// @grant        none
+// ==/UserScript==
+app.buttons.push({
+    icon: "frequently-asked-questions",
+    click: async () => {
+        let s = await f_自动总结问题_打开()
+        app.temperature = 1.5
+        let result = []
+        paragraphs = s.split(/[\r\n]+/)
+        console.log(paragraphs)
+        for (const key in paragraphs) {
+            const paragraph = paragraphs[key]
+            if (!paragraph) continue
+            let json = await send_raw("根据下面这段文字内容,提出问题并回答:\n" + paragraph, '', [{
+                "role": "user",
+                "content": "根据下面这段文字内容,提出问题并回答:\n崇明岛位于长江入海口,是我国第三大岛,面积1200多平方公里,仅次于台湾岛和海南岛,素有“长江门户、东海瀛洲”之称。崇明岛是我国最大的河口冲积岛屿,是由长江携带的泥沙经过常年累月冲积形成的,岛的形状看起来像个大脚印。",
+            },
+            {
+                "role": "AI",
+                "content": "Q:崇明岛的位置在哪里?\nA:崇明岛位于中国长江入海口,是中国最大的河口冲积岛屿。\nQ:崇明岛的面积是多少?\nA:崇明岛的面积是1200多平方公里。\nQ:崇明岛是我国第几大岛?\nA:崇明岛是第三大岛,仅次于台湾岛和海南岛。\nQ:崇明岛的形状是什么?\nA:崇明岛的形状看起来像个大脚印。".replace(/:/g, ': '),
+            },])
+            json = json.split('\n')
+            let Q = ''
+            let A = ''
+            let QA = []
+            json.forEach(l => {
+                if (l.startsWith('Q:')) Q = l.replace("Q:", "").trim()
+                else if (l.startsWith('A:')) {
+                    A = l.replace("A:", "").trim()
+                    QA.push({ Q, A })
+                    Q = ''
+                    A = ''
+                }
+            });
+            result.push(
+                {
+                    paragraph: paragraph,
+                    question: QA
+                }
+            )
+        }
+        console.log(result)
+        f_自动总结问题_另存为(JSON.stringify(result))
+    },
+    color: () => app.color,
+    description: "自动总结问题"
+})
+f_自动总结问题_另存为 = (stringData) => {
+    const blob = new Blob([stringData], {
+        type: "text/plain;charset=utf-8"
+    })
+    const objectURL = URL.createObjectURL(blob)
+    const aTag = document.createElement('a')
+    aTag.href = objectURL
+    aTag.download = Date.now() + "自动总结问题.json"
+    aTag.click()
+    URL.revokeObjectURL(objectURL)
+}
+f_自动总结问题_打开 = async () => {
+    let contents = ''
+    await new Promise(resolve => {
+        let input = document.createElement('input')
+        input.type = 'file'
+        input.accept = '.txt'
+        input.onchange = function () {
+            var file = input.files[0];
+            var reader = new FileReader();
+            reader.onload = function (e) {
+                contents = e.target.result;
+                resolve()
+            };
+            reader.readAsText(file);
+        }
+        input.click()
+    })
+    return contents
+}

+ 6 - 0
cov_ggml_rwkv.bat

@@ -0,0 +1,6 @@
+@echo off
+call environment.bat
+%PYTHON% llms\rwkvcpp\convert_pytorch_to_ggml.py model/RWKV-4-World-0.4B-v1-20230529-ctx4096.pth model/rwkv_ggml_f16.bin float16
+%PYTHON% llms\rwkvcpp\quantize.py model/rwkv_ggml_f16.bin model/rwkv_ggml_q8.bin Q8_0
+pause
+exit /b

+ 6 - 0
cov_torch_rwkv.bat

@@ -0,0 +1,6 @@
+@echo off
+call environment.bat
+:a
+%PYTHON% llms/convert_rwkv.py
+pause
+exit /b

+ 1 - 0
cuda.bat

@@ -0,0 +1 @@
+@nvidia-smi -l 3

+ 106 - 0
docs/api.md

@@ -0,0 +1,106 @@
+# Wenda API
+
+下面是我们支持的 API 接口。
+
+## 聊天
+
+* POST 请求地址: http://{Host:Port}/chat/completions
+* 请求头示例:
+
+```Json
+{
+  "Content-Type": "application/json"
+}
+```
+
+
+* 请求体示例 JSON 格式:
+
+```Json
+{
+  "model": "rwkv",
+  "stream": false,
+  "messages": [
+    {
+      "role": "user",
+      "content": "树上有9只鸟,猎人开枪打死1只,树上还剩几只鸟?"
+    },
+    {
+      "role": "assistant",
+      "content": "题目中提到有9只鸟,猎人打死了其中的1只,所以树上剩下的鸟的数量为8。"
+    },
+    {
+      "role": "user",
+      "content": "错了,树上不再剩下鸟。因为猎人开枪的声音会把其它的鸟都吓飞。"
+    }
+  ]
+}
+```
+
+### 请求体
+
+---
+
+**model** `string` `Optional`
+选择使用的模型,目前暂未使用。
+
+---
+
+**stream** `boolean` `Optional`
+是否使用流式输出返回。
+
+---
+
+**messages** `array` `Required`
+目前对话的信息列表。
+
+| 参数名 | 含义|
+| --- | --- |
+|**role** `string` `Required`|信息的来源作者。取值包括`user`、`assistant`,分别代表用户和模型。|
+|**content** `string` `Required`|信息内容。所有`content`都必须包含内容。|
+
+---
+
+* Node.js代码示例:
+
+```javascript
+const fetch = require("node-fetch");
+
+fetch("http://127.0.0.1:17860/chat/completions", {
+  method: "POST",
+  headers: {
+    "Content-Type": "application/json",
+  },
+  body: JSON.stringify({
+    model: "rwkv",
+    stream: false,
+    messages: [
+      {
+        role: "user",
+        content: "树上有9只鸟,猎人开枪打死1只,树上还剩几只鸟?",
+      },
+    ],
+  }),
+});
+```
+
+* php代码示例:
+
+```php
+$ch = curl_init();
+
+curl_setopt($ch, CURLOPT_URL, 'http://127.0.0.1:17860/chat/completions');
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
+curl_setopt($ch, CURLOPT_POST, 1);
+curl_setopt($ch, CURLOPT_POSTFIELDS, "{\r\n    \"model\": \"rwkv\",\r\n    \"stream\": false,\r\n    \"messages\": [\r\n        {\r\n            \"role\": \"user\",\r\n            \"content\": \"你好\"\r\n        }\r\n    ]\r\n}");
+
+$headers = array();
+$headers[] = 'Content-Type: application/json';
+curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
+
+$result = curl_exec($ch);
+if (curl_errno($ch)) {
+    echo 'Error:' . curl_error($ch);
+}
+curl_close($ch);
+```

+ 58 - 0
docs/install_fess.md

@@ -0,0 +1,58 @@
+# FESS 安装教程
+## win系统
+1. 懒人包中下载fess-14.7.0-with-jdk.7z
+2. 解压到平时放软件的盘
+3. 打开解压出来的fess-14.7.0-with-jdk\bin目录
+4. 双击fess.in.bat
+5. 双击fess.bat. 弹出命令行运行框. 将其最小化
+6. 打开浏览器. 打开网址http://localhost:8080/
+7. 点击右上角log in  输入账号:admin 密码:wenda 进行登录
+8. 点击侧边栏中的Crawler. 点击File System
+9. 点击右上角的Create New
+10. Name输入便于记忆的资料库的名字
+11. Paths输入资料库的地址(格式示例:file:///E:/pdf)
+12. 其余选项保持默认. 下滚至最下方点击Create
+13. 自动返回File System页面. 点击刚才创建的选项(自己输入的Name)
+14. 点击Create new job. 点击Create
+15. 进入侧边栏的System内的Scheduler. 可以看到很多任务
+16. 目录的前面可以看到刚刚创建的job(示例:File Crawler - pdf search). 点击进入
+17. 点击Start now. 刷新界面即可看到该任务正在运行. running
+18. 此时fess就在爬取文件的名字和内容. 可以在资源管理器看到cpu有负载
+19. 挂机。等待爬取完成即可尝试搜索关键词
+
+## linux系统
+1. 安装JDK 
+```
+wget https://download.java.net/java/17/latest/jdk-17_linux-x64_bin.tar.gz
+sudo tar xvf jdk-17_linux-x64_bin.tar.gz -C /usr/local/
+```
+解压后,JDK 17 将被安装在 /usr/local/jdk-17 目录中。
+
+配置环境变量。要在系统中使用 JDK 17,您需要将其添加到 PATH 环境变量中。您可以使用以下命令将其添加到 /etc/profile 文件中:
+
+```
+ rm -f /etc/alternatives/java
+ ln -s /usr/local/jdk-17.0.6/bin/java /etc/alternatives/java
+     echo export JAVA_HOME=/usr/local/jdk-17.0.6 >>/etc/profile
+     echo export PATH='$PATH':'$JAVA_HOME'/bin >>/etc/profile
+     echo export CLASSPATH=.:'$JAVA_HOME'/lib/dt.jar:'$JAVA_HOME'/lib/tools.jar >>/etc/profile
+     source /etc/profile
+```
+确认安装。您可以使用以下命令检查 JDK 17 是否已成功安装:
+```
+java -version
+```
+如果一切正常,您应该会看到类似以下内容的输出:
+
+openjdk version "17.0.1" 2021-10-19
+OpenJDK Runtime Environment (build 17.0.1+12-39)
+OpenJDK 64-Bit Server VM (build 17.0.1+12-39, mixed mode, sharing)
+
+2. 安装fess
+下载fess
+解压fess
+```
+unzip fess-14.7.0.zip
+cd bin
+ ./fess -d
+```

+ 5 - 0
down_hf.bat

@@ -0,0 +1,5 @@
+@echo off
+call environment.bat
+set HF_ENDPOINT=https://hf-mirror.com
+huggingface-cli download --resume-download --local-dir-use-symlinks False TheBloke/SUS-Chat-34B-AWQ --local-dir model/SUS-Chat-34B-AWQ
+pause

+ 32 - 0
environment.bat

@@ -0,0 +1,32 @@
+chcp 65001
+title 闻达
+reg add HKEY_CURRENT_USER\Console /v QuickEdit /t REG_DWORD /d 00000000 /f
+rem 关闭快速编辑模式,防止大神暂停了还说程序有bug
+cls
+set "WINPYDIR=%~dp0\WPy64-31110\python-3.11.1.amd64"
+IF EXIST %WINPYDIR% (
+echo 检测到集成环境
+set "PATH=%WINPYDIR%\;%WINPYDIR%\DLLs;%WINPYDIR%\Scripts;%PATH%;"
+set "PYTHON=%WINPYDIR%\python.exe "
+goto end
+) 
+set "WINPYDIR=%~dp0\..\WPy64-31110\python-3.11.1.amd64"
+IF EXIST %WINPYDIR% (
+echo 检测到集成环境
+set "PATH=%WINPYDIR%\;%WINPYDIR%\DLLs;%WINPYDIR%\Scripts;%PATH%;"
+set "PYTHON=%WINPYDIR%\python.exe "
+goto end
+) 
+set "WINPYDIR=%~dp0\..\runner\py310"
+IF EXIST %WINPYDIR% (
+echo 检测到RWKV-Runner集成环境
+set "PATH=%WINPYDIR%\;%WINPYDIR%\DLLs;%WINPYDIR%\Scripts;%PATH%;"
+set "PYTHON=%WINPYDIR%\python.exe "
+goto end
+) 
+IF EXIST python (
+echo 未检测到集成环境,使用系统Python解释器
+set "PYTHON=python.exe "
+)ELSE (
+)
+:end

+ 190 - 0
example.config.yml

@@ -0,0 +1,190 @@
+logging: False#日志
+logging_path: "sqlite:///../record.db?check_same_thread=False"
+# 日志地址 可以连接远程mysql服务器:'mysql+pymysql://root:123456@localhost:3306/db?charset=utf8'
+port: 17860
+#webui 默认启动端口号"
+library:
+   strategy: "calc:2 rtst:5 agents:0"
+   #库参数,每组参数间用空格分隔,冒号前为知识库类型,后为抽取数量。
+
+   #知识库类型:
+   #serper      基于serper API的Google搜索,请在环境变量设置SERPER_API_KEY(https://serper.dev)
+   #bing        cn.bing搜索,仅国内可用,目前处于服务降级状态
+   #sogowx      sogo微信公众号搜索,可配合相应auto实现全文内容分析
+   #fess        fess搜索引擎
+   #rtst        支持实时生成的sentence_transformers
+   #remote      调用远程闻达知识库,用于集群化部署
+   #kg          知识图谱,暂未启用
+   #特殊库:
+   #mix         根据参数进行多知识库融合
+   #agents      提供网络资源代理,没有知识库查找功能,所以数量为0
+   #            (目前stable-diffusion的auto脚本需要使用其中功能,同时需开启stable-diffusion的api功能)
+
+   count: 5
+   #最大抽取数量(所有知识库总和)
+
+   step: 2
+   #知识库默认上下文步长
+librarys:
+   bing:
+      count:
+         5
+         #最大抽取数量
+   bingsite:
+      count: 5
+      #最大抽取数量
+      site: "www.12371.cn"
+      #搜索网站
+   fess:
+      count: 1
+      #最大抽取数量
+      fess_host: "127.0.0.1:8080"
+      #fess搜索引擎的部署地址
+   remote:
+      host:
+         "http://127.0.0.1:17860/api/find"
+         #远程知识库地址地址
+   rtst:
+      count: 3
+      #最大抽取数量
+      #   backend: Annoy
+      size: 20
+      #分块大小"
+      overlap: 0
+      #分块重叠长度
+      # model_path: "http://127.0.0.1:8000/v1" #在线embedding
+      model_path: "model/m3e-base"
+      #向量模型存储路径
+      device: cuda
+      #embedding运行设备
+   qdrant:
+      path: txt
+      #知识库文本路径
+      model_path: "model/text2vec-large-chinese"
+      #向量模型存储路径
+      qdrant_host: "http://localhost:6333"
+      #qdrant服务地址"
+      device: cpu
+      #qdrant运行设备
+      collection: qa_collection
+      #qdrant集合名称
+   kg:
+      count: 5
+      #最大抽取数量
+      knowledge_path: ""
+      #知识库的文件夹目录名称,若留空则为txt
+      graph_host: ""
+      #图数据库部署地址
+      model_path: ""
+      #信息抽取模型所在路径"
+llm_type: rwkv
+#llm模型类型:glm6b、rwkv、llama、replitcode等,详见相关文件
+llm_models:
+   rwkv:
+      path: "model/rwkv-x060-3b-world-v2-28%trained-20231208-ctx4k.pth" #rwkv模型位置"
+      # path: "model/RWKV-code-4-World-7B-20230820-ctx32k.pth" #rwkv模型位置"
+      strategy: "cuda fp16"
+      #   path: "model/rwkv_ggml_q8.bin"           #rwkv模型位置"
+      #   strategy: "Q8_0"       #rwkvcpp:运行方式,设置strategy诸如"Q8_0->16"即可开启,代表运行Q8_0模型在16个cpu核心上
+      #记得去https://github.com/saharNooby/rwkv.cpp/releases下最新版本文件替换librwkv.so或rwkv.dll
+      #rwkv模型参数"
+
+      historymode: state
+      #rwkv历史记录实现方式:state、string。注意,对于torch实现,本参数已弃用,因为已经实现自动切换。
+      state_source_device: cpu
+      #torch实现下,会保存每个会话的state。每个占用2M显存,本参数控制将其复制到内存以节约显存。
+      #置为cuda:0,代表state来自cuda:0,且需要将其复制到内存以节约显存。
+      #置为cuda:1,同理。
+      #置为cpu,代表不需要复制,如果使用cpu计算,或多卡计算需这么设置。
+      presence_penalty: 0.2
+      count_penalty: 0.2
+   glm6b:
+      path: "model\\chatglm3-6b"
+      #glm模型位置"
+      strategy: "cuda fp16"
+      #cuda fp16	 所有glm模型 要直接跑在gpu上都可以使用这个参数
+      #cuda fp16i8	 fp16原生模型 要自行量化为int8跑在gpu上可以使用这个参数
+      #cuda fp16i4	 fp16原生模型 要自行量化为int4跑在gpu上可以使用这个参数
+      #cuda:0 fp16 *14 -> cuda:1	fp16 多卡流水线并行,使用方法参考RWKV的strategy介绍。总层数28
+      # lora: "model/chatglm_fitness_lora"
+      #glm-lora模型位置
+   internlm:
+      path: model\internlm-chat-7b-8k
+      #模型位置"
+   qwen:
+      path: model\Qwen-7B-Chat
+      #模型位置"
+   baichuan:
+      #模型名字中有gptq会进入gptq模式,不加载lora,且会加载basename中的模型
+      #百川
+      # path: "model\\Baichuan-13B-Chat"
+      # user: "<human>" 
+      # answer: "<bot>"
+      # interface: ":"
+      #vicuna
+      path: model\Baichuan2-13B-Chat-4bits
+      user: "Human"        
+      answer: "Assistant" 
+      interface: ": "   
+      # basename: gptq_model-4bit-128g
+      #lora模型位置"
+      # lora: "model/64rank"
+   generic_transformers:
+      #模型名字中有gptq会进入gptq模式,不加载lora,且会加载basename中的模型
+      #百川
+      # path: "model\\Baichuan-13B-Chat"
+      # user: "<human>" 
+      # answer: "<bot>"
+      # interface: ":"
+      #vicuna
+      # path: model\Llama-2-13B-chat-GPTQ
+      # user: "Human"        
+      # answer: "Assistant" 
+      # interface: ": "   
+      # basename: gptq_model-4bit-128g
+      # stopping_text: "\nHuman:"
+      #SUS-Chat-34B-AWQ
+      path: model\SUS-Chat-34B-AWQ
+      user: "### Human: "        
+      answer: "\n### Assistant:\n" 
+      interface: ""   
+      stopping_text: "### Human: "
+      #lora模型位置"
+      # lora: "model/64rank"
+   aquila:
+      path: "AquilaChat-7B"
+      #模型位置"
+      strategy: "cuda fp16"
+   llama:
+      path: "model/stable-vicuna-13B.ggml.q4_2.bin"
+      #llama模型位置
+      #   strategy: "Q8_0"       #cpp:运行方式,设置strategy诸如"Q8_0"即可开启
+      strategy: "cuda fp16i8"
+   moss:
+      path: "model/moss-moon-003-sft-plugin-int4"
+      #模型位置
+      strategy: ""
+      #模型参数 暂时不用"
+   openai:
+      #   api_host: "https://gpt.lucent.blog/v1" #网友的反代
+      #   api_host: "https://api.openai.com/v1" #官方地址
+      api_host: "http://127.0.0.1:8000/v1" #rwkv runner
+      # api_host: "http://127.0.0.1:3000/v1" #rwkv ai00
+
+   replitcode:
+      path: "model/replit-code-v1-3b"
+      #replitcode模型位置
+      #说明:目前模型参数和chat模型差异较大,写死了,不能通过wenda界面配置,需要调整自行到llm_replitcode.py 文件中调整,或放开wenda界面参数
+      #y = model.generate(x, max_length=100, do_sample=true, top_p=0.95, top_k=4, temperature=0.2, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
+      #模型地址:https://huggingface.co/replit/replit-code-v1-3b ,约10g
+      #作用代码补全:问:def fibonacci(n):
+      #答:def fibonacci(n):
+      #if n == 0:
+      #return 0
+      #elif n == 1:
+      #return 1
+      #else:
+      #return fibonacci(n-1) + fibonacci(n-2)
+      #print(fibonacci(10))
+      strategy: "cuda fp16"
+      #因我的3070只有8g内存,所以是先把模理加载到内存进行精度转换,至少要32g系统内存, fp16 占用不到6g显存,fp32 超过8g未测试"

BIN
imgs/Word.png


BIN
imgs/auto1.jpg


BIN
imgs/auto2.png


BIN
imgs/auto3.png


BIN
imgs/auto4.png


BIN
imgs/replit-code.png


BIN
imgs/setting.png


BIN
imgs/setting2.png


BIN
imgs/webui.jpg


BIN
imgs/zsk-glm.png


BIN
imgs/zsk-rwkv.png


BIN
imgs/zsk-test.png


BIN
imgs/zsk1.jpg


BIN
imgs/zsk2.png


+ 661 - 0
licence

@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published
+    by the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<https://www.gnu.org/licenses/>.

+ 123 - 0
llms/YuanAPI.py

@@ -0,0 +1,123 @@
+import threading
+import datetime
+from bottle import route, response, request, static_file
+import bottle
+
+logging = False
+if logging:
+    from defineSQL import session_maker, 记录
+mutex = threading.Lock()
+
+
+@route('/static/:name')
+def staticjs(name='-'):
+    return static_file(name, root="views\static")
+
+
+@route('/:name')
+def static(name='-'):
+    return static_file(name, root="views")
+
+
+@route('/')
+def index():
+    return static_file("index.html", root="views")
+
+
+当前用户 = ['模型加载中', '', '']
+
+
+@route('/api/chat_now', method='GET')
+def api_chat_now():
+    return '当前用户:'+当前用户[0]+"\n问题:"+当前用户[1]+"\n回答:"+当前用户[2]+''
+
+
+@route('/api/chat_stream', method='POST')
+def api_chat_stream():
+    data = request.json
+    prompt = data.get('prompt')
+    max_length = data.get('max_length')
+    if max_length is None:
+        max_length = 2048
+    top_p = data.get('top_p')
+    if top_p is None:
+        top_p = 0.7
+    temperature = data.get('temperature')
+    if temperature is None:
+        temperature = 0.9
+    history_formatted = None
+    response = ''
+    # print(request.environ)
+    IP = request.environ.get(
+        'HTTP_X_REAL_IP') or request.environ.get('REMOTE_ADDR')
+    global 当前用户
+    with mutex:
+        yield str(len(prompt))+'字正在计算///'
+        try:
+            input_text = "用户:" + prompt + "\n小元:"
+            response = answer(input_text)
+        except Exception as e:
+            # pass
+            print("错误", str(e), e)
+        yield response+'///'
+    if logging:
+        with session_maker() as session:
+            jl = 记录(时间=datetime.datetime.now(), IP=IP, 问=prompt, 答=response)
+            session.add(jl)
+            session.commit()
+    print(f"\033[1;32m{IP}:\033[1;31m{prompt}\033[1;37m\n{response}")
+    yield "/././"
+
+
+model = None
+tokenizer = None
+device = None
+
+
+def preprocess(text):
+    text = text.replace("\n", "\\n").replace("\t", "\\t")
+    return text
+
+
+def postprocess(text):
+    return text.replace("\\n", "\n").replace("\\t", "\t").replace('%20', '  ')
+
+
+def answer(text, sample=True, top_p=1, temperature=0.7):
+    '''sample:是否抽样。生成任务,可以设置为True;
+    top_p:0-1之间,生成的内容越多样'''
+    text = preprocess(text)
+    encoding = tokenizer(text=[text], truncation=True, padding=True,
+                         max_length=768, return_tensors="pt").to(device)
+    if not sample:
+        out = model.generate(**encoding, return_dict_in_generate=True,
+                             output_scores=False, max_new_tokens=512, num_beams=1, length_penalty=0.6)
+    else:
+        out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=512,
+                             do_sample=True, top_p=top_p, temperature=temperature, no_repeat_ngram_size=12)
+    out_text = tokenizer.batch_decode(
+        out["sequences"], skip_special_tokens=True)
+    return postprocess(out_text[0])
+
+
+def load_model():
+    global model, tokenizer, device
+    mutex.acquire()
+    from transformers import T5Tokenizer, T5ForConditionalGeneration
+    tokenizer = T5Tokenizer.from_pretrained(
+        "ChatYuan-large-v2", local_files_only=True)
+    model = T5ForConditionalGeneration.from_pretrained(
+        "ChatYuan-large-v2", local_files_only=True).half()
+    import torch
+    device = torch.device('cuda')
+    model.to(device)
+    mutex.release()
+    print("模型加载完成")
+
+
+thread_load_model = threading.Thread(target=load_model)
+thread_load_model.start()
+
+
+# bottle.debug(True)
+bottle.run(server='paste', port=17860, quiet=True)

+ 674 - 0
llms/gpt4free/LICENSE

@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.

+ 359 - 0
llms/gpt4free/README.md

@@ -0,0 +1,359 @@
+# Free LLM APIs
+
+This repository provides reverse-engineered language models from various sources. Some of these models are already available in the repo, while others are currently being worked on.
+
+> **Important:** If you come across any website offering free language models, please create an issue or submit a pull request with the details. We will reverse engineer it and add it to this repository.
+
+## Best Chatgpt site
+> https://chat.chatbot.sex/chat
+> This site was developed by me and includes **gpt-4**, **internet access** and **gpt-jailbreak's** like DAN
+> You can find an opensource version of it to run locally here: https://github.com/xtekky/chatgpt-clone
+
+## To-Do List
+
+- [x] implement poe.com create bot feature | AVAILABLE NOW
+- [x] renaming the 'poe' module to 'quora'
+- [x] add you.com api
+
+
+## Table of Contents
+
+- [Current Sites (No Authentication / Easy Account Creation)](#current-sites)
+- [Sites with Authentication (Will Reverse Engineer but Need Account Access)](#sites-with-authentication)
+- [Usage Examples](#usage-examples)
+  - [`quora (poe)`](#example-poe)
+  - [`phind`](#example-phind)
+  - [`t3nsor`](#example-t3nsor)
+  - [`ora`](#example-ora)
+  - [`writesonic`](#example-writesonic)
+  - [`you`](#example-you)
+
+## Current Sites <a name="current-sites"></a>
+
+| Website                    | Model(s)             |
+| -------------------------- | -------------------- |
+| [ora.sh](https://ora.sh)   | GPT-3.5 / 4              |
+| [poe.com](https://poe.com) | GPT-4/3.5            |
+| [writesonic.com](https://writesonic.com)|GPT-3.5 / Internet|
+| [t3nsor.com](https://t3nsor.com)|GPT-3.5|
+| [you.com](https://you.com)|GPT-3.5 / Internet / good search|
+| [phind.com](https://phind.com)|GPT-4 / Internet / good search|
+
+## Sites with Authentication <a name="sites-with-authentication"></a>
+
+These sites will be reverse engineered but need account access:
+
+* [chat.openai.com/chat](https://chat.openai.com/chat)
+* [bard.google.com](https://bard.google.com)
+* [bing.com/chat](https://bing.com/chat)
+
+## Usage Examples <a name="usage-examples"></a>
+
+### Example: `quora (poe)` (use like openai pypi package) - GPT-4 <a name="example-poe"></a>
+
+```python
+# quora model names: (use left key as argument)
+models = {
+    'sage'   : 'capybara',
+    'gpt-4'  : 'beaver',
+    'claude-v1.2'         : 'a2_2',
+    'claude-instant-v1.0' : 'a2',
+    'gpt-3.5-turbo'       : 'chinchilla'
+}
+```
+
+#### !! new: bot creation
+
+```python
+# import quora (poe) package
+import quora
+
+# create account
+# make shure to set enable_bot_creation to True
+token = quora.Account.create(logging = True, enable_bot_creation=True)
+
+model = quora.Model.create(
+    token = token,
+    model = 'gpt-3.5-turbo', # or claude-instant-v1.0
+    system_prompt = 'you are ChatGPT a large language model ...' 
+)
+
+print(model.name) # gptx....
+
+# streaming response
+for response in quora.StreamingCompletion.create(
+    custom_model = model.name,
+    prompt       ='hello world',
+    token        = token):
+    
+    print(response.completion.choices[0].text)
+```
+
+#### Normal Response:
+```python
+
+response = quora.Completion.create(model  = 'gpt-4',
+    prompt = 'hello world',
+    token  = token)
+
+print(response.completion.choices[0].text)    
+```     
+
+### Example: `phind` (use like openai pypi package) <a name="example-phind"></a>
+
+```python
+import phind
+
+prompt = 'who won the quatar world cup'
+
+# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
+# stream completion
+for result in phind.StreamingCompletion.create(
+    model  = 'gpt-4',
+    prompt = prompt,
+    results     = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
+    creative    = False,
+    detailed    = False,
+    codeContext = ''):  # up to 3000 chars of code
+
+    print(result.completion.choices[0].text, end='', flush=True)
+
+# normal completion
+result = phind.Completion.create(
+    model  = 'gpt-4',
+    prompt = prompt,
+    results     = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
+    creative    = False,
+    detailed    = False,
+    codeContext = '') # up to 3000 chars of code
+
+print(result.completion.choices[0].text)
+```
+
+### Example: `t3nsor` (use like openai pypi package) <a name="example-t3nsor"></a>
+
+```python
+# Import t3nsor
+import t3nsor
+
+# t3nsor.Completion.create
+# t3nsor.StreamCompletion.create
+
+[...]
+
+```
+
+#### Example Chatbot
+```python
+messages = []
+
+while True:
+    user = input('you: ')
+
+    t3nsor_cmpl = t3nsor.Completion.create(
+        prompt   = user,
+        messages = messages
+    )
+
+    print('gpt:', t3nsor_cmpl.completion.choices[0].text)
+    
+    messages.extend([
+        {'role': 'user', 'content': user }, 
+        {'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text}
+    ])
+```
+
+#### Streaming Response:
+
+```python
+for response in t3nsor.StreamCompletion.create(
+    prompt   = 'write python code to reverse a string',
+    messages = []):
+
+    print(response.completion.choices[0].text)
+```
+
+### Example: `ora` (use like openai pypi package) <a name="example-ora"></a>
+
+### load model (new)
+
+more gpt4 models in `/testing/ora_gpt4.py`
+
+```python
+# normal gpt-4: b8b12eaa-5d47-44d3-92a6-4d706f2bcacf
+model = ora.CompletionModel.load(chatbot_id, 'gpt-4') # or gpt-3.5
+```
+
+#### create model / chatbot: 
+```python
+# inport ora
+import ora
+
+# create model
+model = ora.CompletionModel.create(
+    system_prompt = 'You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible',
+    description   = 'ChatGPT Openai Language Model',
+    name          = 'gpt-3.5')
+
+# init conversation (will give you a conversationId)
+init = ora.Completion.create(
+    model  = model,
+    prompt = 'hello world')
+
+print(init.completion.choices[0].text)
+
+while True:
+    # pass in conversationId to continue conversation
+    
+    prompt = input('>>> ')
+    response = ora.Completion.create(
+        model  = model,
+        prompt = prompt,
+        includeHistory = True, # remember history
+        conversationId = init.id)
+    
+    print(response.completion.choices[0].text)
+```
+
+### Example: `writesonic` (use like openai pypi package) <a name="example-writesonic"></a>
+
+```python
+# import writesonic
+import writesonic
+
+# create account (3-4s)
+account = writesonic.Account.create(logging = True)
+
+# with loging: 
+    # 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
+    # 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
+    # 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
+    # 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
+
+# simple completion
+response = writesonic.Completion.create(
+    api_key = account.key,
+    prompt  = 'hello world'
+)
+
+print(response.completion.choices[0].text) # Hello! How may I assist you today?
+
+# conversation
+
+response = writesonic.Completion.create(
+    api_key = account.key,
+    prompt  = 'what is my name ?',
+    enable_memory = True,
+    history_data  = [
+        {
+            'is_sent': True,
+            'message': 'my name is Tekky'
+        },
+        {
+            'is_sent': False,
+            'message': 'hello Tekky'
+        }
+    ]
+)
+
+print(response.completion.choices[0].text) # Your name is Tekky.
+
+# enable internet
+
+response = writesonic.Completion.create(
+    api_key = account.key,
+    prompt  = 'who won the quatar world cup ?',
+    enable_google_results = True
+)
+
+print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
+```
+
+### Example: `you` (use like openai pypi package) <a name="example-you"></a>
+
+```python
+import you
+
+# simple request with links and details
+response = you.Completion.create(
+    prompt       = "hello world",
+    detailed     = True,
+    includelinks = True,)
+
+print(response)
+
+# {
+#     "response": "...",
+#     "links": [...],
+#     "extra": {...},
+#         "slots": {...}
+#     }
+# }
+
+#chatbot
+
+chat = []
+
+while True:
+    prompt = input("You: ")
+    
+    response = you.Completion.create(
+        prompt  = prompt,
+        chat    = chat)
+    
+    print("Bot:", response["response"])
+    
+    chat.append({"question": prompt, "answer": response["response"]})
+```
+
+## Dependencies
+
+The repository is written in Python and requires the following packages:
+
+* websocket-client
+* requests
+* tls-client
+
+You can install these packages using the provided `requirements.txt` file.
+
+## Repository structure:
+    .
+    ├── ora/
+    ├── quora/ (/poe)
+    ├── t3nsor/
+    ├── testing/
+    ├── writesonic/
+    ├── you/
+    ├── README.md  <-- this file.
+    └── requirements.txt
+
+
+## Star History
+
+[![Star History Chart](https://api.star-history.com/svg?repos=xtekky/openai-gpt4&type=Timeline)](https://star-history.com/#xtekky/openai-gpt4&Timeline)
+
+
+## Copyright: 
+This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)     
+
+Most code, with the exception of `quora/api.py` (by [ading2210](https://github.com/ading2210)), has been written by me, [xtekky](https://github.com/xtekky).
+
+### Copyright Notice:
+```
+xtekky/openai-gpt4: multiple reverse engineered language-model api's to decentralise the ai industry.  
+Copyright (C) 2023 xtekky
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <https://www.gnu.org/licenses/>.
+```
+

+ 49 - 0
llms/gpt4free/ora/__init__.py

@@ -0,0 +1,49 @@
+from ora.model  import CompletionModel
+from ora.typing import OraResponse
+from requests   import post
+from time       import time
+from random     import randint
+
+class Completion:
+    def create(
+        model : CompletionModel,
+        prompt: str,
+        includeHistory: bool = True,
+        conversationId: str or None = None) -> OraResponse:
+        
+        extra = {
+            'conversationId': conversationId} if conversationId else {}
+        
+        response = post('https://ora.sh/api/conversation', 
+            headers = {
+                "host"          : "ora.sh",
+                "authorization" : f"Bearer AY0{randint(1111, 9999)}",
+                "user-agent"    : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
+                "origin"        : "https://ora.sh",
+                "referer"       : "https://ora.sh/chat/",
+            },
+            json = extra | {
+                'chatbotId': model.id,
+                'input'    : prompt,
+                'userId'   : model.createdBy,
+                'model'    : model.modelName,
+                'provider' : 'OPEN_AI',
+                'includeHistory': includeHistory}).json()
+
+        return OraResponse({
+            'id'     : response['conversationId'], 
+            'object' : 'text_completion', 
+            'created': int(time()),
+            'model'  : model.slug, 
+            'choices': [{
+                    'text'          : response['response'], 
+                    'index'         : 0, 
+                    'logprobs'      : None, 
+                    'finish_reason' : 'stop'
+            }],
+            'usage': {
+                'prompt_tokens'     : len(prompt), 
+                'completion_tokens' : len(response['response']), 
+                'total_tokens'      : len(prompt) + len(response['response'])
+            }
+        })

+ 55 - 0
llms/gpt4free/ora/model.py

@@ -0,0 +1,55 @@
+from uuid     import uuid4
+from requests import post
+
+class CompletionModel:
+    system_prompt = None
+    description   = None
+    createdBy     = None
+    createdAt     = None
+    slug          = None
+    id            = None
+    modelName     = None
+    model         = 'gpt-3.5-turbo'
+    
+    def create(
+        system_prompt: str = 'You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible', 
+        description  : str = 'ChatGPT Openai Language Model',
+        name         : str = 'gpt-3.5'):
+
+        CompletionModel.system_prompt = system_prompt
+        CompletionModel.description   = description
+        CompletionModel.slug          = name
+        
+        headers = {
+            'Origin'    : 'https://ora.sh',
+            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
+            'Referer'   : 'https://ora.sh/',
+            'Host'      : 'ora.sh',
+        }
+        
+        response = post('https://ora.sh/api/assistant', headers = headers, json = {
+            'prompt'     : system_prompt,
+            'userId'     : f'auto:{uuid4()}',
+            'name'       : name,
+            'description': description})
+        
+        print(response.json())
+        
+        CompletionModel.id        = response.json()['id']
+        CompletionModel.createdBy = response.json()['createdBy']
+        CompletionModel.createdAt = response.json()['createdAt']
+        
+        return CompletionModel
+    
+    def load(chatbotId: str, modelName: str = 'gpt-3.5-turbo', userId: str = None):
+        if userId is None: userId = f'{uuid4()}'
+
+        CompletionModel.system_prompt = None
+        CompletionModel.description   = None
+        CompletionModel.slug          = None
+        CompletionModel.id        = chatbotId
+        CompletionModel.createdBy = userId
+        CompletionModel.createdAt = None
+        CompletionModel.modelName = modelName
+        
+        return CompletionModel

+ 39 - 0
llms/gpt4free/ora/typing.py

@@ -0,0 +1,39 @@
+class OraResponse:
+    
+    class Completion:
+        
+        class Choices:
+            def __init__(self, choice: dict) -> None:
+                self.text           = choice['text']
+                self.content        = self.text.encode()
+                self.index          = choice['index']
+                self.logprobs       = choice['logprobs']
+                self.finish_reason  = choice['finish_reason']
+                
+            def __repr__(self) -> str:
+                return f'''<__main__.APIResponse.Completion.Choices(\n    text           = {self.text.encode()},\n    index          = {self.index},\n    logprobs       = {self.logprobs},\n    finish_reason  = {self.finish_reason})object at 0x1337>'''
+
+        def __init__(self, choices: dict) -> None:
+            self.choices = [self.Choices(choice) for choice in choices]
+
+    class Usage:
+        def __init__(self, usage_dict: dict) -> None:
+            self.prompt_tokens      = usage_dict['prompt_tokens']
+            self.completion_tokens  = usage_dict['completion_tokens']
+            self.total_tokens       = usage_dict['total_tokens']
+
+        def __repr__(self):
+            return f'''<__main__.APIResponse.Usage(\n    prompt_tokens      = {self.prompt_tokens},\n    completion_tokens  = {self.completion_tokens},\n    total_tokens       = {self.total_tokens})object at 0x1337>'''
+    
+    def __init__(self, response_dict: dict) -> None:
+        
+        self.response_dict  = response_dict
+        self.id             = response_dict['id']
+        self.object         = response_dict['object']
+        self.created        = response_dict['created']
+        self.model          = response_dict['model']
+        self.completion     = self.Completion(response_dict['choices'])
+        self.usage          = self.Usage(response_dict['usage'])
+
+    def json(self) -> dict:
+        return self.response_dict

+ 241 - 0
llms/gpt4free/phind/__init__.py

@@ -0,0 +1,241 @@
+from urllib.parse import quote
+from time         import time
+from datetime     import datetime
+from queue        import Queue, Empty
+from threading    import Thread
+from re           import findall
+
+from curl_cffi.requests import post
+
+class PhindResponse:
+    
+    class Completion:
+        
+        class Choices:
+            def __init__(self, choice: dict) -> None:
+                self.text           = choice['text']
+                self.content        = self.text.encode()
+                self.index          = choice['index']
+                self.logprobs       = choice['logprobs']
+                self.finish_reason  = choice['finish_reason']
+                
+            def __repr__(self) -> str:
+                return f'''<__main__.APIResponse.Completion.Choices(\n    text           = {self.text.encode()},\n    index          = {self.index},\n    logprobs       = {self.logprobs},\n    finish_reason  = {self.finish_reason})object at 0x1337>'''
+
+        def __init__(self, choices: dict) -> None:
+            self.choices = [self.Choices(choice) for choice in choices]
+
+    class Usage:
+        def __init__(self, usage_dict: dict) -> None:
+            self.prompt_tokens      = usage_dict['prompt_tokens']
+            self.completion_tokens  = usage_dict['completion_tokens']
+            self.total_tokens       = usage_dict['total_tokens']
+
+        def __repr__(self):
+            return f'''<__main__.APIResponse.Usage(\n    prompt_tokens      = {self.prompt_tokens},\n    completion_tokens  = {self.completion_tokens},\n    total_tokens       = {self.total_tokens})object at 0x1337>'''
+    
+    def __init__(self, response_dict: dict) -> None:
+        
+        self.response_dict  = response_dict
+        self.id             = response_dict['id']
+        self.object         = response_dict['object']
+        self.created        = response_dict['created']
+        self.model          = response_dict['model']
+        self.completion     = self.Completion(response_dict['choices'])
+        self.usage          = self.Usage(response_dict['usage'])
+
+    def json(self) -> dict:
+        return self.response_dict
+
+
+class Search:
+    def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
+        if not actualSearch:
+            return {
+                '_type': 'SearchResponse',
+                'queryContext': {
+                    'originalQuery': prompt
+                },
+                'webPages': {
+                    'webSearchUrl': f'https://www.bing.com/search?q={quote(prompt)}',
+                    'totalEstimatedMatches': 0,
+                    'value': []
+                },
+                'rankingResponse': {
+                    'mainline': {
+                        'items': []
+                    }
+                }
+            }
+        
+        headers = {
+            'authority'    : 'www.phind.com',
+            'origin'       : 'https://www.phind.com',
+            'referer'      : 'https://www.phind.com/search',
+            'user-agent'   : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
+        }
+        
+        return post('https://www.phind.com/api/bing/search', headers = headers, json = { 
+            'q': prompt,
+            'userRankList': {},
+            'browserLanguage': language}).json()['rawBingResults']
+
+
+class Completion:
+    def create(
+        model = 'gpt-4', 
+        prompt: str = '', 
+        results: dict = None, 
+        creative: bool = False, 
+        detailed: bool = False, 
+        codeContext: str = '',
+        language: str = 'en') -> PhindResponse:
+        
+        if results is None:
+            results = Search.create(prompt, actualSearch = True)
+        
+        if len(codeContext) > 2999:
+            raise ValueError('codeContext must be less than 3000 characters')
+        
+        models = {
+            'gpt-4' : 'expert',
+            'gpt-3.5-turbo' : 'intermediate',
+            'gpt-3.5': 'intermediate',
+        }
+        
+        json_data = {
+            'question'    : prompt,
+            'bingResults' : results, #response.json()['rawBingResults'],
+            'codeContext' : codeContext,
+            'options': {
+                'skill'   : models[model],
+                'date'    : datetime.now().strftime("%d/%m/%Y"),
+                'language': language,
+                'detailed': detailed,
+                'creative': creative
+            }
+        }
+        
+        headers = {
+            'authority'    : 'www.phind.com',
+            'origin'       : 'https://www.phind.com',
+            'referer'      : f'https://www.phind.com/search?q={quote(prompt)}&c=&source=searchbox&init=true',
+            'user-agent'   : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
+        }
+        
+        completion = ''
+        response   = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999)
+        for line in response.text.split('\r\n\r\n'):
+            completion += (line.replace('data: ', ''))
+        
+        return PhindResponse({
+            'id'     : f'cmpl-1337-{int(time())}', 
+            'object' : 'text_completion', 
+            'created': int(time()), 
+            'model'  : models[model], 
+            'choices': [{
+                    'text'          : completion, 
+                    'index'         : 0, 
+                    'logprobs'      : None, 
+                    'finish_reason' : 'stop'
+            }], 
+            'usage': {
+                'prompt_tokens'     : len(prompt), 
+                'completion_tokens' : len(completion), 
+                'total_tokens'      : len(prompt) + len(completion)
+            }
+        })
+        
+
+class StreamingCompletion:
+    message_queue    = Queue()
+    stream_completed = False
+    
+    def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
+        
+        models = {
+            'gpt-4' : 'expert',
+            'gpt-3.5-turbo' : 'intermediate',
+            'gpt-3.5': 'intermediate',
+        }
+
+        json_data = {
+            'question'    : prompt,
+            'bingResults' : results,
+            'codeContext' : codeContext,
+            'options': {
+                'skill'   : models[model],
+                'date'    : datetime.now().strftime("%d/%m/%Y"),
+                'language': language,
+                'detailed': detailed,
+                'creative': creative
+            }
+        }
+        
+        stream_req = post('https://www.phind.com/api/infer/answer', json=json_data, timeout=99999,
+            content_callback = StreamingCompletion.handle_stream_response,
+            headers = {
+                'authority'    : 'www.phind.com',
+                'origin'       : 'https://www.phind.com',
+                'referer'      : f'https://www.phind.com/search?q={quote(prompt)}&c=&source=searchbox&init=true',
+                'user-agent'   : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
+        })
+
+        StreamingCompletion.stream_completed = True
+
+    @staticmethod
+    def create(
+        model       : str = 'gpt-4', 
+        prompt      : str = '', 
+        results     : dict = None, 
+        creative    : bool = False, 
+        detailed    : bool = False, 
+        codeContext : str = '',
+        language    : str = 'en'):
+        
+        if results is None:
+            results = Search.create(prompt, actualSearch = True)
+        
+        if len(codeContext) > 2999:
+            raise ValueError('codeContext must be less than 3000 characters')
+        
+        Thread(target = StreamingCompletion.request, args = [
+            model, prompt, results, creative, detailed, codeContext, language]).start()
+        
+        while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
+            try:
+                chunk = StreamingCompletion.message_queue.get(timeout=0)
+
+                if chunk == b'data:  \r\ndata: \r\ndata: \r\n\r\n':
+                    chunk = b'data:  \n\n\r\n\r\n'
+                
+                chunk = chunk.decode()
+                
+                chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
+                chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
+                chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
+                
+                yield PhindResponse({
+                    'id'     : f'cmpl-1337-{int(time())}', 
+                    'object' : 'text_completion', 
+                    'created': int(time()), 
+                    'model'  : model, 
+                    'choices': [{
+                            'text'          : chunk, 
+                            'index'         : 0, 
+                            'logprobs'      : None, 
+                            'finish_reason' : 'stop'
+                    }], 
+                    'usage': {
+                        'prompt_tokens'     : len(prompt), 
+                        'completion_tokens' : len(chunk), 
+                        'total_tokens'      : len(prompt) + len(chunk)
+                    }
+                })
+
+            except Empty:
+                pass
+
+    @staticmethod
+    def handle_stream_response(response):
+        StreamingCompletion.message_queue.put(response)

+ 350 - 0
llms/gpt4free/quora/__init__.py

@@ -0,0 +1,350 @@
+from quora.api    import Client as PoeClient
+from quora.mail   import Mail
+from requests     import Session
+from re           import search, findall
+from json         import loads
+from time         import sleep
+from pathlib      import Path
+from random       import choice, choices, randint
+from string       import ascii_letters, digits
+from urllib       import parse
+from os           import urandom
+from hashlib      import md5
+from json         import dumps
+from pypasser     import reCaptchaV3
+
+def extract_formkey(html):
+    script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
+    script_text  = search(script_regex, html).group(1)
+    key_regex    = r'var .="([0-9a-f]+)",'
+    key_text     = search(key_regex, script_text).group(1)
+    cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
+    cipher_pairs = findall(cipher_regex, script_text)
+
+    formkey_list = [""] * len(cipher_pairs)
+    for pair in cipher_pairs:
+        formkey_index, key_index = map(int, pair)
+        formkey_list[formkey_index] = key_text[key_index]
+    formkey = "".join(formkey_list)
+
+    return formkey
+
+class PoeResponse:
+    
+    class Completion:
+        
+        class Choices:
+            def __init__(self, choice: dict) -> None:
+                self.text           = choice['text']
+                self.content        = self.text.encode()
+                self.index          = choice['index']
+                self.logprobs       = choice['logprobs']
+                self.finish_reason  = choice['finish_reason']
+                
+            def __repr__(self) -> str:
+                return f'''<__main__.APIResponse.Completion.Choices(\n    text           = {self.text.encode()},\n    index          = {self.index},\n    logprobs       = {self.logprobs},\n    finish_reason  = {self.finish_reason})object at 0x1337>'''
+
+        def __init__(self, choices: dict) -> None:
+            self.choices = [self.Choices(choice) for choice in choices]
+
+    class Usage:
+        def __init__(self, usage_dict: dict) -> None:
+            self.prompt_tokens      = usage_dict['prompt_tokens']
+            self.completion_tokens  = usage_dict['completion_tokens']
+            self.total_tokens       = usage_dict['total_tokens']
+
+        def __repr__(self):
+            return f'''<__main__.APIResponse.Usage(\n    prompt_tokens      = {self.prompt_tokens},\n    completion_tokens  = {self.completion_tokens},\n    total_tokens       = {self.total_tokens})object at 0x1337>'''
+    
+    def __init__(self, response_dict: dict) -> None:
+        
+        self.response_dict  = response_dict
+        self.id             = response_dict['id']
+        self.object         = response_dict['object']
+        self.created        = response_dict['created']
+        self.model          = response_dict['model']
+        self.completion     = self.Completion(response_dict['choices'])
+        self.usage          = self.Usage(response_dict['usage'])
+
+    def json(self) -> dict:
+        return self.response_dict
+
+
+class ModelResponse:
+    def __init__(self, json_response: dict) -> None:
+        self.id         = json_response['data']['poeBotCreate']['bot']['id']
+        self.name       = json_response['data']['poeBotCreate']['bot']['displayName']
+        self.limit      = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit']
+        self.deleted    = json_response['data']['poeBotCreate']['bot']['deletionState']
+
+class Model:
+    def create(
+        token: str,
+        model: str = 'gpt-3.5-turbo', # claude-instant
+        system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
+        description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
+        handle: str = None) -> ModelResponse:
+        
+        models = {
+            'gpt-3.5-turbo' : 'chinchilla',
+            'claude-instant-v1.0': 'a2',
+            'gpt-4': 'beaver'
+        }
+        
+        if not handle:
+            handle = f'gptx{randint(1111111, 9999999)}'
+        
+        client = Session()
+        client.cookies['p-b'] = token
+        
+        formkey  = extract_formkey(client.get('https://poe.com').text)
+        settings = client.get('https://poe.com/api/settings').json()
+
+        client.headers = {
+            "host"              : "poe.com",
+            "origin"            : "https://poe.com",
+            "referer"           : "https://poe.com/",
+            "content-type"      : "application/json",
+            "poe-formkey"       : formkey,
+            "poe-tchannel"      : settings['tchannelData']['channel'],
+            "user-agent"        : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
+            "connection"        : "keep-alive",
+            "sec-ch-ua"         : "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"",
+            "sec-ch-ua-mobile"  : "?0",
+            "sec-ch-ua-platform": "\"macOS\"",
+            "content-type"      : "application/json",
+            "sec-fetch-site"    : "same-origin",
+            "sec-fetch-mode"    : "cors",
+            "sec-fetch-dest"    : "empty",
+            "accept"            : "*/*",
+            "accept-encoding"   : "gzip, deflate, br",
+            "accept-language"   : "en-GB,en-US;q=0.9,en;q=0.8",
+        }
+        
+        payload = dumps(separators=(',', ':'), obj = {
+            'queryName': 'CreateBotMain_poeBotCreate_Mutation',
+            'variables': {
+                'model'                 : models[model],
+                'handle'                : handle,
+                'prompt'                : system_prompt,
+                'isPromptPublic'        : True,
+                'introduction'          : '',
+                'description'           : description,
+                'profilePictureUrl'     : 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6',
+                'apiUrl'                : None,
+                'apiKey'                : ''.join(choices(ascii_letters + digits, k = 32)),
+                'isApiBot'              : False,
+                'hasLinkification'      : False,
+                'hasMarkdownRendering'  : False,
+                'hasSuggestedReplies'   : False,
+                'isPrivateBot'          : False
+            },
+            'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n  $model: String!\n  $handle: String!\n  $prompt: String!\n  $isPromptPublic: Boolean!\n  $introduction: String!\n  $description: String!\n  $profilePictureUrl: String\n  $apiUrl: String\n  $apiKey: String\n  $isApiBot: Boolean\n  $hasLinkification: Boolean\n  $hasMarkdownRendering: Boolean\n  $hasSuggestedReplies: Boolean\n  $isPrivateBot: Boolean\n) {\n  poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n    status\n    bot {\n      id\n      ...BotHeader_bot\n    }\n  }\n}\n\nfragment BotHeader_bot on Bot {\n  displayName\n  messageLimit {\n    dailyLimit\n  }\n  ...BotImage_bot\n  ...BotLink_bot\n  ...IdAnnotation_node\n  ...botHelpers_useViewerCanAccessPrivateBot\n  ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n  displayName\n  ...botHelpers_useDeletion_bot\n  ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n  image {\n    __typename\n    ... on LocalBotImage {\n      localName\n    }\n    ... on UrlBotImage {\n      url\n    }\n  }\n  ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n  displayName\n}\n\nfragment IdAnnotation_node on Node {\n  __isNode: __typename\n  id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n  deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n  isPrivateBot\n  viewerIsCreator\n}\n',
+        })
+
+        base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
+        client.headers["poe-tag-id"] =  md5(base_string.encode()).hexdigest()
+        
+        response = client.post("https://poe.com/api/gql_POST", data = payload)
+
+        if not 'success' in response.text:
+            raise Exception('''
+                Bot creation Failed
+                !! Important !!
+                Bot creation was not enabled on this account
+                please use: quora.Account.create with enable_bot_creation set to True
+            ''')
+        
+        return ModelResponse(response.json())
+
+class Account:
+    def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
+        client       = Session()
+        client.proxies = {
+            'http': f'http://{proxy}',
+            'https': f'http://{proxy}'} if proxy else None
+
+        mail         = Mail(client.proxies)
+        mail_token   = None
+        _, mail_address = mail.get_mail()
+        if mail_address is None:
+            raise Exception('Error creating mail, please use proxies')
+
+        if logging: print('email', mail_address)
+
+        client.headers = {
+            "host"              : "poe.com",
+            "connection"        : "keep-alive",
+            "cache-control"     : "max-age=0",
+            "sec-ch-ua"         : "\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"",
+            "sec-ch-ua-mobile"  : "?0",
+            "sec-ch-ua-platform": "\"macOS\"",
+            "user-agent"        : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.54",
+            "accept"            : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+            "sec-fetch-site"    : "same-origin",
+            "sec-fetch-mode"    : "navigate",
+            "content-type"      : "application/json",
+            "sec-fetch-user"    : "?1",
+            "sec-fetch-dest"    : "document",
+            "accept-encoding"   : "gzip, deflate, br",
+            "accept-language"   : "en-GB,en;q=0.9,en-US;q=0.8",
+            "upgrade-insecure-requests": "1",
+        }
+
+        client.headers["poe-formkey"]  = extract_formkey(client.get('https://poe.com/login').text)
+        client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
+
+        token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=vkGiR-M4noX1963Xi_DB0JeI&size=invisible&cb=hhps5wd06eue')
+
+        payload = dumps(separators = (',', ':'), obj = {
+            'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
+            'variables': {
+                'emailAddress': mail_address,
+                'phoneNumber': None,
+                'recaptchaToken': token
+            },
+            'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n  $emailAddress: String\n  $phoneNumber: String\n  $recaptchaToken: String\n) {\n  sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n    status\n    errorMessage\n  }\n}\n',
+        })
+
+        base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
+        client.headers["poe-tag-id"] =  md5(base_string.encode()).hexdigest()
+
+        response = client.post('https://poe.com/api/gql_POST', data=payload)
+        if 'Bad Request' in response.text:
+            if logging: print('bad request, retrying...' , response.json())
+            quit()
+
+        if logging: print('send_code' ,response.json())
+
+        while True:
+            sleep(1)
+            messages = mail.fetch_inbox()
+            
+            if len(messages["messages"]) > 0:
+                email_content = mail.get_message_content(messages["messages"][0]["_id"])
+                mail_token = findall(r';">(\d{6,7})</div>', email_content)[0]
+            
+            if mail_token:
+                break
+
+        if logging: print('code', mail_token)
+
+        payload = dumps(separators = (',', ':'), obj={
+            "queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
+            "variables": {
+                "verificationCode"  : mail_token,
+                "emailAddress"      : mail_address,
+                "phoneNumber"       : None
+            },
+            "query": "mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n  $verificationCode: String!\n  $emailAddress: String\n  $phoneNumber: String\n) {\n  signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n    status\n    errorMessage\n  }\n}\n"
+        })
+
+        base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
+        client.headers["poe-tag-id"] =  md5(base_string.encode()).hexdigest()
+
+        response = client.post('https://poe.com/api/gql_POST', data = payload)
+        if logging: print('verify_code', response.json())
+
+        token = parse.unquote(client.cookies.get_dict()['p-b'])
+
+        with open(Path(__file__).resolve().parent / 'cookies.txt', 'a') as f:
+            f.write(f'{token}\n')
+            
+        if enable_bot_creation:
+
+            payload = dumps(separators = (',', ':'), obj={
+                "queryName": "UserProfileConfigurePreviewModal_markMultiplayerNuxCompleted_Mutation",
+                "variables": {},
+                "query": "mutation UserProfileConfigurePreviewModal_markMultiplayerNuxCompleted_Mutation {\n  markMultiplayerNuxCompleted {\n    viewer {\n      hasCompletedMultiplayerNux\n      id\n    }\n  }\n}\n"
+            })
+            
+            base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
+            client.headers["poe-tag-id"] =  md5(base_string.encode()).hexdigest()
+
+            resp = client.post("https://poe.com/api/gql_POST", data = payload)
+            if logging: print(resp.json())
+
+        return token
+    
+    def get():
+        cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines()
+        return choice(cookies)
+
+class StreamingCompletion:
+    def create(
+        model : str = 'gpt-4',
+        custom_model : bool = None,
+        prompt: str = 'hello world',
+        token : str = ''):
+
+        models = {
+            'sage'   : 'capybara',
+            'gpt-4'  : 'beaver',
+            'claude-v1.2'         : 'a2_2',
+            'claude-instant-v1.0' : 'a2',
+            'gpt-3.5-turbo'       : 'chinchilla'
+        }
+        
+        _model = models[model] if not custom_model else custom_model
+        
+        client = PoeClient(token)
+        
+        for chunk in client.send_message(_model, prompt):
+            
+            yield PoeResponse({
+                'id'     : chunk["messageId"], 
+                'object' : 'text_completion', 
+                'created': chunk['creationTime'], 
+                'model'  : _model, 
+                'choices': [{
+                        'text'          : chunk["text_new"], 
+                        'index'         : 0, 
+                        'logprobs'      : None, 
+                        'finish_reason' : 'stop'
+                }],
+                'usage': {
+                    'prompt_tokens'     : len(prompt), 
+                    'completion_tokens' : len(chunk["text_new"]), 
+                    'total_tokens'      : len(prompt) + len(chunk["text_new"])
+                }
+            })
+
+class Completion:
+    def create(
+        model : str = 'gpt-4',
+        custom_model : str = None,
+        prompt: str = 'hello world',
+        token : str = ''):
+
+        models = {
+            'sage'   : 'capybara',
+            'gpt-4'  : 'beaver',
+            'claude-v1.2'         : 'a2_2',
+            'claude-instant-v1.0' : 'a2',
+            'gpt-3.5-turbo'       : 'chinchilla'
+        }
+        
+        _model = models[model] if not custom_model else custom_model
+        
+        client = PoeClient(token)
+        
+        for chunk in client.send_message(_model, prompt):
+            pass
+        
+        return PoeResponse({
+                'id'     : chunk["messageId"], 
+                'object' : 'text_completion', 
+                'created': chunk['creationTime'], 
+                'model'  : _model, 
+                'choices': [{
+                        'text'          : chunk["text"], 
+                        'index'         : 0, 
+                        'logprobs'      : None, 
+                        'finish_reason' : 'stop'
+                }],
+                'usage': {
+                    'prompt_tokens'     : len(prompt), 
+                    'completion_tokens' : len(chunk["text"]), 
+                    'total_tokens'      : len(prompt) + len(chunk["text"])
+                }
+            })

+ 532 - 0
llms/gpt4free/quora/api.py

@@ -0,0 +1,532 @@
+# This file was taken from the repository poe-api https://github.com/ading2210/poe-api and is unmodified
+# This file is licensed under the GNU GPL v3 and written by @ading2210
+
+# license:
+# ading2210/poe-api: a reverse engineered Python API wrapepr for Quora's Poe
+# Copyright (C) 2023 ading2210
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+import requests
+import re
+import json
+import random
+import logging
+import time
+import queue
+import threading
+import traceback
+import hashlib
+import string
+import random
+import requests.adapters
+import websocket
+from pathlib import Path
+from urllib.parse import urlparse
+
+
+parent_path = Path(__file__).resolve().parent
+queries_path = parent_path / "graphql"
+queries = {}
+
+logging.basicConfig()
+logger = logging.getLogger()
+
+user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0"
+
+
+def load_queries():
+    for path in queries_path.iterdir():
+        if path.suffix != ".graphql":
+            continue
+        with open(path) as f:
+            queries[path.stem] = f.read()
+
+
+def generate_payload(query_name, variables):
+    return {
+        "query": queries[query_name],
+        "variables": variables
+    }
+
+
+def request_with_retries(method, *args, **kwargs):
+    attempts = kwargs.get("attempts") or 10
+    url = args[0]
+    for i in range(attempts):
+        r = method(*args, **kwargs)
+        if r.status_code == 200:
+            return r
+        logger.warn(
+            f"Server returned a status code of {r.status_code} while downloading {url}. Retrying ({i+1}/{attempts})...")
+
+    raise RuntimeError(f"Failed to download {url} too many times.")
+
+
+class Client:
+    gql_url = "https://poe.com/api/gql_POST"
+    gql_recv_url = "https://poe.com/api/receive_POST"
+    home_url = "https://poe.com"
+    settings_url = "https://poe.com/api/settings"
+
+    def __init__(self, token, proxy=None):
+        self.proxy = proxy
+        self.session = requests.Session()
+        self.adapter = requests.adapters.HTTPAdapter(
+            pool_connections=100, pool_maxsize=100)
+        self.session.mount("http://", self.adapter)
+        self.session.mount("https://", self.adapter)
+
+        if proxy:
+            self.session.proxies = {
+                "http": self.proxy,
+                "https": self.proxy
+            }
+            logger.info(f"Proxy enabled: {self.proxy}")
+
+        self.active_messages = {}
+        self.message_queues = {}
+
+        self.session.cookies.set("p-b", token, domain="poe.com")
+        self.headers = {
+            "User-Agent": user_agent,
+            "Referrer": "https://poe.com/",
+            "Origin": "https://poe.com",
+        }
+        self.session.headers.update(self.headers)
+
+        self.setup_connection()
+        self.connect_ws()
+
+    def setup_connection(self):
+        self.ws_domain = f"tch{random.randint(1, 1e6)}"
+        self.next_data = self.get_next_data(overwrite_vars=True)
+        self.channel = self.get_channel_data()
+        self.bots = self.get_bots(download_next_data=False)
+        self.bot_names = self.get_bot_names()
+
+        self.gql_headers = {
+            "poe-formkey": self.formkey,
+            "poe-tchannel": self.channel["channel"],
+        }
+        self.gql_headers = {**self.gql_headers, **self.headers}
+        self.subscribe()
+
+    def extract_formkey(self, html):
+        script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
+        script_text = re.search(script_regex, html).group(1)
+        key_regex = r'var .="([0-9a-f]+)",'
+        key_text = re.search(key_regex, script_text).group(1)
+        cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
+        cipher_pairs = re.findall(cipher_regex, script_text)
+
+        formkey_list = [""] * len(cipher_pairs)
+        for pair in cipher_pairs:
+            formkey_index, key_index = map(int, pair)
+            formkey_list[formkey_index] = key_text[key_index]
+        formkey = "".join(formkey_list)
+
+        return formkey
+
+    def get_next_data(self, overwrite_vars=False):
+        logger.info("Downloading next_data...")
+
+        r = request_with_retries(self.session.get, self.home_url)
+        json_regex = r'<script id="__NEXT_DATA__" type="application\/json">(.+?)</script>'
+        json_text = re.search(json_regex, r.text).group(1)
+        next_data = json.loads(json_text)
+
+        if overwrite_vars:
+            self.formkey = self.extract_formkey(r.text)
+            self.viewer = next_data["props"]["pageProps"]["payload"]["viewer"]
+            self.next_data = next_data
+
+        return next_data
+
+    def get_bot(self, display_name):
+        url = f'https://poe.com/_next/data/{self.next_data["buildId"]}/{display_name}.json'
+
+        r = request_with_retries(self.session.get, url)
+
+        chat_data = r.json()["pageProps"]["payload"]["chatOfBotDisplayName"]
+        return chat_data
+
+    def get_bots(self, download_next_data=True):
+        logger.info("Downloading all bots...")
+        if download_next_data:
+            next_data = self.get_next_data(overwrite_vars=True)
+        else:
+            next_data = self.next_data
+
+        if not "availableBots" in self.viewer:
+            raise RuntimeError("Invalid token or no bots are available.")
+        bot_list = self.viewer["availableBots"]
+
+        threads = []
+        bots = {}
+
+        def get_bot_thread(bot):
+            chat_data = self.get_bot(bot["displayName"])
+            bots[chat_data["defaultBotObject"]["nickname"]] = chat_data
+
+        for bot in bot_list:
+            thread = threading.Thread(
+                target=get_bot_thread, args=(bot,), daemon=True)
+            threads.append(thread)
+
+        for thread in threads:
+            thread.start()
+        for thread in threads:
+            thread.join()
+
+        self.bots = bots
+        self.bot_names = self.get_bot_names()
+        return bots
+
+    def get_bot_names(self):
+        bot_names = {}
+        for bot_nickname in self.bots:
+            bot_obj = self.bots[bot_nickname]["defaultBotObject"]
+            bot_names[bot_nickname] = bot_obj["displayName"]
+        return bot_names
+
+    def get_remaining_messages(self, chatbot):
+        chat_data = self.get_bot(self.bot_names[chatbot])
+        return chat_data["defaultBotObject"]["messageLimit"]["numMessagesRemaining"]
+
+    def get_channel_data(self, channel=None):
+        logger.info("Downloading channel data...")
+        r = request_with_retries(self.session.get, self.settings_url)
+        data = r.json()
+
+        return data["tchannelData"]
+
+    def get_websocket_url(self, channel=None):
+        if channel is None:
+            channel = self.channel
+        query = f'?min_seq={channel["minSeq"]}&channel={channel["channel"]}&hash={channel["channelHash"]}'
+        return f'wss://{self.ws_domain}.tch.{channel["baseHost"]}/up/{channel["boxName"]}/updates'+query
+
+    def send_query(self, query_name, variables):
+        for i in range(20):
+            json_data = generate_payload(query_name, variables)
+            payload = json.dumps(json_data, separators=(",", ":"))
+
+            base_string = payload + \
+                self.gql_headers["poe-formkey"] + "WpuLMiXEKKE98j56k"
+
+            headers = {
+                "content-type": "application/json",
+                "poe-tag-id": hashlib.md5(base_string.encode()).hexdigest()
+            }
+            headers = {**self.gql_headers, **headers}
+
+            r = request_with_retries(
+                self.session.post, self.gql_url, data=payload, headers=headers)
+
+            data = r.json()
+            if data["data"] == None:
+                logger.warn(
+                    f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i+1}/20)')
+                time.sleep(2)
+                continue
+
+            return r.json()
+
+        raise RuntimeError(f'{query_name} failed too many times.')
+
+    def subscribe(self):
+        logger.info("Subscribing to mutations")
+        result = self.send_query("SubscriptionsMutation", {
+            "subscriptions": [
+                {
+                    "subscriptionName": "messageAdded",
+                    "query": queries["MessageAddedSubscription"]
+                },
+                {
+                    "subscriptionName": "viewerStateUpdated",
+                    "query": queries["ViewerStateUpdatedSubscription"]
+                }
+            ]
+        })
+
+    def ws_run_thread(self):
+        kwargs = {}
+        if self.proxy:
+            proxy_parsed = urlparse(self.proxy)
+            kwargs = {
+                "proxy_type": proxy_parsed.scheme,
+                "http_proxy_host": proxy_parsed.hostname,
+                "http_proxy_port": proxy_parsed.port
+            }
+
+        self.ws.run_forever(**kwargs)
+
+    def connect_ws(self):
+        self.ws_connected = False
+        self.ws = websocket.WebSocketApp(
+            self.get_websocket_url(),
+            header={"User-Agent": user_agent},
+            on_message=self.on_message,
+            on_open=self.on_ws_connect,
+            on_error=self.on_ws_error,
+            on_close=self.on_ws_close
+        )
+        t = threading.Thread(target=self.ws_run_thread, daemon=True)
+        t.start()
+        while not self.ws_connected:
+            time.sleep(0.01)
+
+    def disconnect_ws(self):
+        if self.ws:
+            self.ws.close()
+        self.ws_connected = False
+
+    def on_ws_connect(self, ws):
+        self.ws_connected = True
+
+    def on_ws_close(self, ws, close_status_code, close_message):
+        self.ws_connected = False
+        logger.warn(
+            f"Websocket closed with status {close_status_code}: {close_message}")
+
+    def on_ws_error(self, ws, error):
+        self.disconnect_ws()
+        self.connect_ws()
+
+    def on_message(self, ws, msg):
+        try:
+            data = json.loads(msg)
+
+            if not "messages" in data:
+                return
+
+            for message_str in data["messages"]:
+                message_data = json.loads(message_str)
+                if message_data["message_type"] != "subscriptionUpdate":
+                    continue
+                message = message_data["payload"]["data"]["messageAdded"]
+
+                copied_dict = self.active_messages.copy()
+                for key, value in copied_dict.items():
+                    # add the message to the appropriate queue
+                    if value == message["messageId"] and key in self.message_queues:
+                        self.message_queues[key].put(message)
+                        return
+
+                    # indicate that the response id is tied to the human message id
+                    elif key != "pending" and value == None and message["state"] != "complete":
+                        self.active_messages[key] = message["messageId"]
+                        self.message_queues[key].put(message)
+                        return
+
+        except Exception:
+            logger.error(traceback.format_exc())
+            self.disconnect_ws()
+            self.connect_ws()
+
+    def send_message(self, chatbot, message, with_chat_break=False, timeout=20):
+        # if there is another active message, wait until it has finished sending
+        while None in self.active_messages.values():
+            time.sleep(0.01)
+
+        # None indicates that a message is still in progress
+        self.active_messages["pending"] = None
+
+        logger.info(f"Sending message to {chatbot}: {message}")
+
+        # reconnect websocket
+        if not self.ws_connected:
+            self.disconnect_ws()
+            self.setup_connection()
+            self.connect_ws()
+
+        message_data = self.send_query("SendMessageMutation", {
+            "bot": chatbot,
+            "query": message,
+            "chatId": self.bots[chatbot]["chatId"],
+            "source": None,
+            "withChatBreak": with_chat_break
+        })
+        del self.active_messages["pending"]
+
+        if not message_data["data"]["messageEdgeCreate"]["message"]:
+            raise RuntimeError(f"Daily limit reached for {chatbot}.")
+        try:
+            human_message = message_data["data"]["messageEdgeCreate"]["message"]
+            human_message_id = human_message["node"]["messageId"]
+        except TypeError:
+            raise RuntimeError(
+                f"An unknown error occurred. Raw response data: {message_data}")
+
+        # indicate that the current message is waiting for a response
+        self.active_messages[human_message_id] = None
+        self.message_queues[human_message_id] = queue.Queue()
+
+        last_text = ""
+        message_id = None
+        while True:
+            try:
+                message = self.message_queues[human_message_id].get(
+                    timeout=timeout)
+            except queue.Empty:
+                del self.active_messages[human_message_id]
+                del self.message_queues[human_message_id]
+                raise RuntimeError("Response timed out.")
+
+            # only break when the message is marked as complete
+            if message["state"] == "complete":
+                if last_text and message["messageId"] == message_id:
+                    break
+                else:
+                    continue
+
+            # update info about response
+            message["text_new"] = message["text"][len(last_text):]
+            last_text = message["text"]
+            message_id = message["messageId"]
+
+            yield message
+
+        del self.active_messages[human_message_id]
+        del self.message_queues[human_message_id]
+
+    def send_chat_break(self, chatbot):
+        logger.info(f"Sending chat break to {chatbot}")
+        result = self.send_query("AddMessageBreakMutation", {
+            "chatId": self.bots[chatbot]["chatId"]
+        })
+        return result["data"]["messageBreakCreate"]["message"]
+
+    def get_message_history(self, chatbot, count=25, cursor=None):
+        logger.info(f"Downloading {count} messages from {chatbot}")
+
+        messages = []
+        if cursor == None:
+            chat_data = self.get_bot(self.bot_names[chatbot])
+            if not chat_data["messagesConnection"]["edges"]:
+                return []
+            messages = chat_data["messagesConnection"]["edges"][:count]
+            cursor = chat_data["messagesConnection"]["pageInfo"]["startCursor"]
+            count -= len(messages)
+
+        cursor = str(cursor)
+        if count > 50:
+            messages = self.get_message_history(
+                chatbot, count=50, cursor=cursor) + messages
+            while count > 0:
+                count -= 50
+                new_cursor = messages[0]["cursor"]
+                new_messages = self.get_message_history(
+                    chatbot, min(50, count), cursor=new_cursor)
+                messages = new_messages + messages
+            return messages
+        elif count <= 0:
+            return messages
+
+        result = self.send_query("ChatListPaginationQuery", {
+            "count": count,
+            "cursor": cursor,
+            "id": self.bots[chatbot]["id"]
+        })
+        query_messages = result["data"]["node"]["messagesConnection"]["edges"]
+        messages = query_messages + messages
+        return messages
+
+    def delete_message(self, message_ids):
+        logger.info(f"Deleting messages: {message_ids}")
+        if not type(message_ids) is list:
+            message_ids = [int(message_ids)]
+
+        result = self.send_query("DeleteMessageMutation", {
+            "messageIds": message_ids
+        })
+
+    def purge_conversation(self, chatbot, count=-1):
+        logger.info(f"Purging messages from {chatbot}")
+        last_messages = self.get_message_history(chatbot, count=50)[::-1]
+        while last_messages:
+            message_ids = []
+            for message in last_messages:
+                if count == 0:
+                    break
+                count -= 1
+                message_ids.append(message["node"]["messageId"])
+
+            self.delete_message(message_ids)
+
+            if count == 0:
+                return
+            last_messages = self.get_message_history(chatbot, count=50)[::-1]
+        logger.info(f"No more messages left to delete.")
+
+    def create_bot(self, handle, prompt="", base_model="chinchilla", description="",
+                   intro_message="", api_key=None, api_bot=False, api_url=None,
+                   prompt_public=True, pfp_url=None, linkification=False,
+                   markdown_rendering=True, suggested_replies=False, private=False):
+        result = self.send_query("PoeBotCreateMutation", {
+            "model": base_model,
+            "handle": handle,
+            "prompt": prompt,
+            "isPromptPublic": prompt_public,
+            "introduction": intro_message,
+            "description": description,
+            "profilePictureUrl": pfp_url,
+            "apiUrl": api_url,
+            "apiKey": api_key,
+            "isApiBot": api_bot,
+            "hasLinkification": linkification,
+            "hasMarkdownRendering": markdown_rendering,
+            "hasSuggestedReplies": suggested_replies,
+            "isPrivateBot": private
+        })
+
+        data = result["data"]["poeBotCreate"]
+        if data["status"] != "success":
+            raise RuntimeError(
+                f"Poe returned an error while trying to create a bot: {data['status']}")
+        self.get_bots()
+        return data
+
+    def edit_bot(self, bot_id, handle, prompt="", base_model="chinchilla", description="",
+                 intro_message="", api_key=None, api_url=None, private=False,
+                 prompt_public=True, pfp_url=None, linkification=False,
+                 markdown_rendering=True, suggested_replies=False):
+        
+        result = self.send_query("PoeBotEditMutation", {
+            "baseBot": base_model,
+            "botId": bot_id,
+            "handle": handle,
+            "prompt": prompt,
+            "isPromptPublic": prompt_public,
+            "introduction": intro_message,
+            "description": description,
+            "profilePictureUrl": pfp_url,
+            "apiUrl": api_url,
+            "apiKey": api_key,
+            "hasLinkification": linkification,
+            "hasMarkdownRendering": markdown_rendering,
+            "hasSuggestedReplies": suggested_replies,
+            "isPrivateBot": private
+        })
+
+        data = result["data"]["poeBotEdit"]
+        if data["status"] != "success":
+            raise RuntimeError(
+                f"Poe returned an error while trying to edit a bot: {data['status']}")
+        self.get_bots()
+        return data
+
+
+load_queries()

+ 21 - 0
llms/gpt4free/quora/cookies.txt

@@ -0,0 +1,21 @@
+SmPiNXZI9hBTuf3viz74PA==
+zw7RoKQfeEehiaelYMRWeA==
+NEttgJ_rRQdO05Tppx6hFw==
+3OnmC0r9njYdNWhWszdQJg==
+8hZKR7MxwUTEHvO45TEViw==
+Eea6BqK0AmosTKzoI3AAow==
+pUEbtxobN_QUSpLIR8RGww==
+9_dUWxKkHHhpQRSvCvBk2Q==
+UV45rvGwUwi2qV9QdIbMcw==
+cVIN0pK1Wx-F7zCdUxlYqA==
+UP2wQVds17VFHh6IfCQFrA==
+18eKr0ME2Tzifdfqat38Aw==
+FNgKEpc2r-XqWe0rHBfYpg==
+juCAh6kB0sUpXHvKik2woA==
+nBvuNYRLaE4xE4HuzBPiIQ==
+oyae3iClomSrk6RJywZ4iw==
+1Z27Ul8BTdNOhncT5H6wdg==
+wfUfJIlwQwUss8l-3kDt3w==
+f6Jw_Nr0PietpNCtOCXJTw==
+6Jc3yCs7XhDRNHa4ZML09g==
+3vy44sIy-ZlTMofFiFDttw==

+ 52 - 0
llms/gpt4free/quora/graphql/AddHumanMessageMutation.graphql

@@ -0,0 +1,52 @@
+mutation AddHumanMessageMutation(
+    $chatId: BigInt!
+    $bot: String!
+    $query: String!
+    $source: MessageSource
+    $withChatBreak: Boolean! = false
+) {
+    messageCreateWithStatus(
+        chatId: $chatId
+        bot: $bot
+        query: $query
+        source: $source
+        withChatBreak: $withChatBreak
+    ) {
+        message {
+            id
+            __typename
+            messageId
+            text
+            linkifiedText
+            authorNickname
+            state
+            vote
+            voteReason
+            creationTime
+            suggestedReplies
+            chat {
+                id
+                shouldShowDisclaimer
+            }
+        }
+        messageLimit{
+          canSend
+          numMessagesRemaining
+          resetTime
+          shouldShowReminder
+        }
+        chatBreak {
+            id
+            __typename
+            messageId
+            text
+            linkifiedText
+            authorNickname
+            state
+            vote
+            voteReason
+            creationTime
+            suggestedReplies
+        }
+    }
+}

+ 17 - 0
llms/gpt4free/quora/graphql/AddMessageBreakMutation.graphql

@@ -0,0 +1,17 @@
+mutation AddMessageBreakMutation($chatId: BigInt!) {
+    messageBreakCreate(chatId: $chatId) {
+        message {
+            id
+            __typename
+            messageId
+            text
+            linkifiedText
+            authorNickname
+            state
+            vote
+            voteReason
+            creationTime
+            suggestedReplies
+        }
+    }
+}

+ 7 - 0
llms/gpt4free/quora/graphql/AutoSubscriptionMutation.graphql

@@ -0,0 +1,7 @@
+mutation AutoSubscriptionMutation($subscriptions: [AutoSubscriptionQuery!]!) {
+    autoSubscribe(subscriptions: $subscriptions) {
+        viewer {
+            id
+        }
+    }
+}

+ 8 - 0
llms/gpt4free/quora/graphql/BioFragment.graphql

@@ -0,0 +1,8 @@
+fragment BioFragment on Viewer {
+    id
+    poeUser {
+        id
+        uid
+        bio
+    }
+}

+ 5 - 0
llms/gpt4free/quora/graphql/ChatAddedSubscription.graphql

@@ -0,0 +1,5 @@
+subscription ChatAddedSubscription {
+	chatAdded {
+		...ChatFragment
+	}
+}

+ 6 - 0
llms/gpt4free/quora/graphql/ChatFragment.graphql

@@ -0,0 +1,6 @@
+fragment ChatFragment on Chat {
+    id
+    chatId
+    defaultBotNickname
+    shouldShowDisclaimer
+}

+ 378 - 0
llms/gpt4free/quora/graphql/ChatListPaginationQuery.graphql

@@ -0,0 +1,378 @@
+query ChatListPaginationQuery(
+  $count: Int = 5
+  $cursor: String
+  $id: ID!
+) {
+  node(id: $id) {
+    __typename
+    ...ChatPageMain_chat_1G22uz
+    id
+  }
+}
+
+fragment BotImage_bot on Bot {
+  displayName
+  ...botHelpers_useDeletion_bot
+  ...BotImage_useProfileImage_bot
+}
+
+fragment BotImage_useProfileImage_bot on Bot {
+  image {
+    __typename
+    ... on LocalBotImage {
+      localName
+    }
+    ... on UrlBotImage {
+      url
+    }
+  }
+  ...botHelpers_useDeletion_bot
+}
+
+fragment ChatMessageDownvotedButton_message on Message {
+  ...MessageFeedbackReasonModal_message
+  ...MessageFeedbackOtherModal_message
+}
+
+fragment ChatMessageDropdownMenu_message on Message {
+  id
+  messageId
+  vote
+  text
+  author
+  ...chatHelpers_isBotMessage
+}
+
+fragment ChatMessageFeedbackButtons_message on Message {
+  id
+  messageId
+  vote
+  voteReason
+  ...ChatMessageDownvotedButton_message
+}
+
+fragment ChatMessageInputView_chat on Chat {
+  id
+  chatId
+  defaultBotObject {
+    nickname
+    messageLimit {
+      dailyBalance
+      shouldShowRemainingMessageCount
+    }
+    hasClearContext
+    isDown
+    ...botHelpers_useDeletion_bot
+    id
+  }
+  shouldShowDisclaimer
+  ...chatHelpers_useSendMessage_chat
+  ...chatHelpers_useSendChatBreak_chat
+}
+
+fragment ChatMessageInputView_edges on MessageEdge {
+  node {
+    ...chatHelpers_isChatBreak
+    ...chatHelpers_isHumanMessage
+    state
+    text
+    id
+  }
+}
+
+fragment ChatMessageOverflowButton_message on Message {
+  text
+  ...ChatMessageDropdownMenu_message
+  ...chatHelpers_isBotMessage
+}
+
+fragment ChatMessageSuggestedReplies_SuggestedReplyButton_chat on Chat {
+  ...chatHelpers_useSendMessage_chat
+}
+
+fragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {
+  messageId
+}
+
+fragment ChatMessageSuggestedReplies_chat on Chat {
+  ...ChatWelcomeView_chat
+  ...ChatMessageSuggestedReplies_SuggestedReplyButton_chat
+  defaultBotObject {
+    hasWelcomeTopics
+    id
+  }
+}
+
+fragment ChatMessageSuggestedReplies_message on Message {
+  suggestedReplies
+  ...ChatMessageSuggestedReplies_SuggestedReplyButton_message
+}
+
+fragment ChatMessage_chat on Chat {
+  defaultBotObject {
+    hasWelcomeTopics
+    hasSuggestedReplies
+    disclaimerText
+    messageLimit {
+      ...ChatPageRateLimitedBanner_messageLimit
+    }
+    ...ChatPageDisclaimer_bot
+    id
+  }
+  ...ChatMessageSuggestedReplies_chat
+  ...ChatWelcomeView_chat
+}
+
+fragment ChatMessage_message on Message {
+  id
+  messageId
+  text
+  author
+  linkifiedText
+  state
+  contentType
+  ...ChatMessageSuggestedReplies_message
+  ...ChatMessageFeedbackButtons_message
+  ...ChatMessageOverflowButton_message
+  ...chatHelpers_isHumanMessage
+  ...chatHelpers_isBotMessage
+  ...chatHelpers_isChatBreak
+  ...chatHelpers_useTimeoutLevel
+  ...MarkdownLinkInner_message
+  ...IdAnnotation_node
+}
+
+fragment ChatMessagesView_chat on Chat {
+  ...ChatMessage_chat
+  ...ChatWelcomeView_chat
+  ...IdAnnotation_node
+  defaultBotObject {
+    hasWelcomeTopics
+    messageLimit {
+      ...ChatPageRateLimitedBanner_messageLimit
+    }
+    id
+  }
+}
+
+fragment ChatMessagesView_edges on MessageEdge {
+  node {
+    id
+    messageId
+    creationTime
+    ...ChatMessage_message
+    ...chatHelpers_isBotMessage
+    ...chatHelpers_isHumanMessage
+    ...chatHelpers_isChatBreak
+  }
+}
+
+fragment ChatPageDeleteFooter_chat on Chat {
+  ...MessageDeleteConfirmationModal_chat
+}
+
+fragment ChatPageDisclaimer_bot on Bot {
+  disclaimerText
+}
+
+fragment ChatPageMainFooter_chat on Chat {
+  defaultBotObject {
+    ...ChatPageMainFooter_useAccessMessage_bot
+    id
+  }
+  ...ChatMessageInputView_chat
+  ...ChatPageShareFooter_chat
+  ...ChatPageDeleteFooter_chat
+}
+
+fragment ChatPageMainFooter_edges on MessageEdge {
+  ...ChatMessageInputView_edges
+}
+
+fragment ChatPageMainFooter_useAccessMessage_bot on Bot {
+  ...botHelpers_useDeletion_bot
+  ...botHelpers_useViewerCanAccessPrivateBot
+}
+
+fragment ChatPageMain_chat_1G22uz on Chat {
+  id
+  chatId
+  ...ChatPageShareFooter_chat
+  ...ChatPageDeleteFooter_chat
+  ...ChatMessagesView_chat
+  ...MarkdownLinkInner_chat
+  ...chatHelpers_useUpdateStaleChat_chat
+  ...ChatSubscriptionPaywallContextWrapper_chat
+  ...ChatPageMainFooter_chat
+  messagesConnection(last: $count, before: $cursor) {
+    edges {
+      ...ChatMessagesView_edges
+      ...ChatPageMainFooter_edges
+      ...MarkdownLinkInner_edges
+      node {
+        ...chatHelpers_useUpdateStaleChat_message
+        id
+        __typename
+      }
+      cursor
+      id
+    }
+    pageInfo {
+      hasPreviousPage
+      startCursor
+    }
+    id
+  }
+}
+
+fragment ChatPageRateLimitedBanner_messageLimit on MessageLimit {
+  numMessagesRemaining
+}
+
+fragment ChatPageShareFooter_chat on Chat {
+  chatId
+}
+
+fragment ChatSubscriptionPaywallContextWrapper_chat on Chat {
+  defaultBotObject {
+    messageLimit {
+      numMessagesRemaining
+      shouldShowRemainingMessageCount
+    }
+    ...SubscriptionPaywallModal_bot
+    id
+  }
+}
+
+fragment ChatWelcomeView_ChatWelcomeButton_chat on Chat {
+  ...chatHelpers_useSendMessage_chat
+}
+
+fragment ChatWelcomeView_chat on Chat {
+  ...ChatWelcomeView_ChatWelcomeButton_chat
+  defaultBotObject {
+    displayName
+    id
+  }
+}
+
+fragment IdAnnotation_node on Node {
+  __isNode: __typename
+  id
+}
+
+fragment MarkdownLinkInner_chat on Chat {
+  id
+  chatId
+  defaultBotObject {
+    nickname
+    id
+  }
+  ...chatHelpers_useSendMessage_chat
+}
+
+fragment MarkdownLinkInner_edges on MessageEdge {
+  node {
+    state
+    id
+  }
+}
+
+fragment MarkdownLinkInner_message on Message {
+  messageId
+}
+
+fragment MessageDeleteConfirmationModal_chat on Chat {
+  id
+}
+
+fragment MessageFeedbackOtherModal_message on Message {
+  id
+  messageId
+}
+
+fragment MessageFeedbackReasonModal_message on Message {
+  id
+  messageId
+}
+
+fragment SubscriptionPaywallModal_bot on Bot {
+  displayName
+  messageLimit {
+    dailyLimit
+    numMessagesRemaining
+    shouldShowRemainingMessageCount
+    resetTime
+  }
+  ...BotImage_bot
+}
+
+fragment botHelpers_useDeletion_bot on Bot {
+  deletionState
+}
+
+fragment botHelpers_useViewerCanAccessPrivateBot on Bot {
+  isPrivateBot
+  viewerIsCreator
+}
+
+fragment chatHelpers_isBotMessage on Message {
+  ...chatHelpers_isHumanMessage
+  ...chatHelpers_isChatBreak
+}
+
+fragment chatHelpers_isChatBreak on Message {
+  author
+}
+
+fragment chatHelpers_isHumanMessage on Message {
+  author
+}
+
+fragment chatHelpers_useSendChatBreak_chat on Chat {
+  id
+  chatId
+  defaultBotObject {
+    nickname
+    introduction
+    model
+    id
+  }
+  shouldShowDisclaimer
+}
+
+fragment chatHelpers_useSendMessage_chat on Chat {
+  id
+  chatId
+  defaultBotObject {
+    id
+    nickname
+  }
+  shouldShowDisclaimer
+}
+
+fragment chatHelpers_useTimeoutLevel on Message {
+  id
+  state
+  text
+  messageId
+  chat {
+    chatId
+    defaultBotNickname
+    id
+  }
+}
+
+fragment chatHelpers_useUpdateStaleChat_chat on Chat {
+  chatId
+  defaultBotObject {
+    contextClearWindowSecs
+    id
+  }
+  ...chatHelpers_useSendChatBreak_chat
+}
+
+fragment chatHelpers_useUpdateStaleChat_message on Message {
+  creationTime
+  ...chatHelpers_isChatBreak
+}

+ 26 - 0
llms/gpt4free/quora/graphql/ChatPaginationQuery.graphql

@@ -0,0 +1,26 @@
+query ChatPaginationQuery($bot: String!, $before: String, $last: Int! = 10) {
+    chatOfBot(bot: $bot) {
+        id
+        __typename
+        messagesConnection(before: $before, last: $last) {
+            pageInfo {
+                hasPreviousPage
+            }
+            edges {
+                node {
+                    id
+                    __typename
+                    messageId
+                    text
+                    linkifiedText
+                    authorNickname
+                    state
+                    vote
+                    voteReason
+                    creationTime
+                    suggestedReplies
+                }
+            }
+        }
+    }
+}

+ 8 - 0
llms/gpt4free/quora/graphql/ChatViewQuery.graphql

@@ -0,0 +1,8 @@
+query ChatViewQuery($bot: String!) {
+    chatOfBot(bot: $bot) {
+        id
+        chatId
+        defaultBotNickname
+        shouldShowDisclaimer
+    }
+}

+ 7 - 0
llms/gpt4free/quora/graphql/DeleteHumanMessagesMutation.graphql

@@ -0,0 +1,7 @@
+mutation DeleteHumanMessagesMutation($messageIds: [BigInt!]!) {
+    messagesDelete(messageIds: $messageIds) {
+        viewer {
+            id
+        }
+    }
+}

+ 7 - 0
llms/gpt4free/quora/graphql/DeleteMessageMutation.graphql

@@ -0,0 +1,7 @@
+mutation deleteMessageMutation(
+  $messageIds: [BigInt!]!
+) {
+  messagesDelete(messageIds: $messageIds) {
+    edgeIds
+  }
+}

+ 8 - 0
llms/gpt4free/quora/graphql/HandleFragment.graphql

@@ -0,0 +1,8 @@
+fragment HandleFragment on Viewer {
+    id
+    poeUser {
+        id
+        uid
+        handle
+    }
+}

+ 13 - 0
llms/gpt4free/quora/graphql/LoginWithVerificationCodeMutation.graphql

@@ -0,0 +1,13 @@
+mutation LoginWithVerificationCodeMutation(
+    $verificationCode: String!
+    $emailAddress: String
+    $phoneNumber: String
+) {
+    loginWithVerificationCode(
+        verificationCode: $verificationCode
+        emailAddress: $emailAddress
+        phoneNumber: $phoneNumber
+    ) {
+        status
+    }
+}

+ 100 - 0
llms/gpt4free/quora/graphql/MessageAddedSubscription.graphql

@@ -0,0 +1,100 @@
+subscription messageAdded (
+  $chatId: BigInt!
+) {
+  messageAdded(chatId: $chatId) {
+    id
+    messageId
+    creationTime
+    state
+    ...ChatMessage_message
+    ...chatHelpers_isBotMessage
+  }
+}
+
+fragment ChatMessageDownvotedButton_message on Message {
+  ...MessageFeedbackReasonModal_message
+  ...MessageFeedbackOtherModal_message
+}
+
+fragment ChatMessageDropdownMenu_message on Message {
+  id
+  messageId
+  vote
+  text
+  linkifiedText
+  ...chatHelpers_isBotMessage
+}
+
+fragment ChatMessageFeedbackButtons_message on Message {
+  id
+  messageId
+  vote
+  voteReason
+  ...ChatMessageDownvotedButton_message
+}
+
+fragment ChatMessageOverflowButton_message on Message {
+  text
+  ...ChatMessageDropdownMenu_message
+  ...chatHelpers_isBotMessage
+}
+
+fragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {
+  messageId
+}
+
+fragment ChatMessageSuggestedReplies_message on Message {
+  suggestedReplies
+  ...ChatMessageSuggestedReplies_SuggestedReplyButton_message
+}
+
+fragment ChatMessage_message on Message {
+  id
+  messageId
+  text
+  author
+  linkifiedText
+  state
+  ...ChatMessageSuggestedReplies_message
+  ...ChatMessageFeedbackButtons_message
+  ...ChatMessageOverflowButton_message
+  ...chatHelpers_isHumanMessage
+  ...chatHelpers_isBotMessage
+  ...chatHelpers_isChatBreak
+  ...chatHelpers_useTimeoutLevel
+  ...MarkdownLinkInner_message
+}
+
+fragment MarkdownLinkInner_message on Message {
+  messageId
+}
+
+fragment MessageFeedbackOtherModal_message on Message {
+  id
+  messageId
+}
+
+fragment MessageFeedbackReasonModal_message on Message {
+  id
+  messageId
+}
+
+fragment chatHelpers_isBotMessage on Message {
+  ...chatHelpers_isHumanMessage
+  ...chatHelpers_isChatBreak
+}
+
+fragment chatHelpers_isChatBreak on Message {
+  author
+}
+
+fragment chatHelpers_isHumanMessage on Message {
+  author
+}
+
+fragment chatHelpers_useTimeoutLevel on Message {
+  id
+  state
+  text
+  messageId
+}

+ 6 - 0
llms/gpt4free/quora/graphql/MessageDeletedSubscription.graphql

@@ -0,0 +1,6 @@
+subscription MessageDeletedSubscription($chatId: BigInt!) {
+    messageDeleted(chatId: $chatId) {
+        id
+        messageId
+    }
+}

+ 13 - 0
llms/gpt4free/quora/graphql/MessageFragment.graphql

@@ -0,0 +1,13 @@
+fragment MessageFragment on Message {
+    id
+    __typename
+    messageId
+    text
+    linkifiedText
+    authorNickname
+    state
+    vote
+    voteReason
+    creationTime
+    suggestedReplies
+}

+ 7 - 0
llms/gpt4free/quora/graphql/MessageRemoveVoteMutation.graphql

@@ -0,0 +1,7 @@
+mutation MessageRemoveVoteMutation($messageId: BigInt!) {
+	messageRemoveVote(messageId: $messageId) {
+		message {
+			...MessageFragment
+		}
+	}
+}

+ 7 - 0
llms/gpt4free/quora/graphql/MessageSetVoteMutation.graphql

@@ -0,0 +1,7 @@
+mutation MessageSetVoteMutation($messageId: BigInt!, $voteType: VoteType!, $reason: String) {
+	messageSetVote(messageId: $messageId, voteType: $voteType, reason: $reason) {
+		message {
+			...MessageFragment
+		}
+	}
+}

+ 73 - 0
llms/gpt4free/quora/graphql/PoeBotCreateMutation.graphql

@@ -0,0 +1,73 @@
+mutation CreateBotMain_poeBotCreate_Mutation(
+  $model: String!
+  $handle: String!
+  $prompt: String!
+  $isPromptPublic: Boolean!
+  $introduction: String!
+  $description: String!
+  $profilePictureUrl: String
+  $apiUrl: String
+  $apiKey: String
+  $isApiBot: Boolean
+  $hasLinkification: Boolean
+  $hasMarkdownRendering: Boolean
+  $hasSuggestedReplies: Boolean
+  $isPrivateBot: Boolean
+) {
+  poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
+    status
+    bot {
+      id
+      ...BotHeader_bot
+    }
+  }
+}
+
+fragment BotHeader_bot on Bot {
+  displayName
+  messageLimit {
+    dailyLimit
+  }
+  ...BotImage_bot
+  ...BotLink_bot
+  ...IdAnnotation_node
+  ...botHelpers_useViewerCanAccessPrivateBot
+  ...botHelpers_useDeletion_bot
+}
+
+fragment BotImage_bot on Bot {
+  displayName
+  ...botHelpers_useDeletion_bot
+  ...BotImage_useProfileImage_bot
+}
+
+fragment BotImage_useProfileImage_bot on Bot {
+  image {
+    __typename
+    ... on LocalBotImage {
+      localName
+    }
+    ... on UrlBotImage {
+      url
+    }
+  }
+  ...botHelpers_useDeletion_bot
+}
+
+fragment BotLink_bot on Bot {
+  displayName
+}
+
+fragment IdAnnotation_node on Node {
+  __isNode: __typename
+  id
+}
+
+fragment botHelpers_useDeletion_bot on Bot {
+  deletionState
+}
+
+fragment botHelpers_useViewerCanAccessPrivateBot on Bot {
+  isPrivateBot
+  viewerIsCreator
+}

+ 24 - 0
llms/gpt4free/quora/graphql/PoeBotEditMutation.graphql

@@ -0,0 +1,24 @@
+mutation EditBotMain_poeBotEdit_Mutation(
+  $botId: BigInt!
+  $handle: String!
+  $description: String!
+  $introduction: String!
+  $isPromptPublic: Boolean!
+  $baseBot: String!
+  $profilePictureUrl: String
+  $prompt: String!
+  $apiUrl: String
+  $apiKey: String
+  $hasLinkification: Boolean
+  $hasMarkdownRendering: Boolean
+  $hasSuggestedReplies: Boolean
+  $isPrivateBot: Boolean
+) {
+  poeBotEdit(botId: $botId, handle: $handle, description: $description, introduction: $introduction, isPromptPublic: $isPromptPublic, model: $baseBot, promptPlaintext: $prompt, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
+    status
+    bot {
+      handle
+      id
+    }
+  }
+}

+ 40 - 0
llms/gpt4free/quora/graphql/SendMessageMutation.graphql

@@ -0,0 +1,40 @@
+mutation chatHelpers_sendMessageMutation_Mutation(
+  $chatId: BigInt!
+  $bot: String!
+  $query: String!
+  $source: MessageSource
+  $withChatBreak: Boolean!
+) {
+  messageEdgeCreate(chatId: $chatId, bot: $bot, query: $query, source: $source, withChatBreak: $withChatBreak) {
+    chatBreak {
+      cursor
+      node {
+        id
+        messageId
+        text
+        author
+        suggestedReplies
+        creationTime
+        state
+      }
+      id
+    }
+    message {
+      cursor
+      node {
+        id
+        messageId
+        text
+        author
+        suggestedReplies
+        creationTime
+        state
+        chat {
+          shouldShowDisclaimer
+          id
+        }
+      }
+      id
+    }
+  }
+}

+ 12 - 0
llms/gpt4free/quora/graphql/SendVerificationCodeForLoginMutation.graphql

@@ -0,0 +1,12 @@
+mutation SendVerificationCodeForLoginMutation(
+    $emailAddress: String
+    $phoneNumber: String
+) {
+    sendVerificationCode(
+        verificationReason: login
+        emailAddress: $emailAddress
+        phoneNumber: $phoneNumber
+    ) {
+        status
+    }
+}

+ 9 - 0
llms/gpt4free/quora/graphql/ShareMessagesMutation.graphql

@@ -0,0 +1,9 @@
+mutation ShareMessagesMutation(
+    $chatId: BigInt!
+    $messageIds: [BigInt!]!
+    $comment: String
+) {
+    messagesShare(chatId: $chatId, messageIds: $messageIds, comment: $comment) {
+        shareCode
+    }
+}

+ 13 - 0
llms/gpt4free/quora/graphql/SignupWithVerificationCodeMutation.graphql

@@ -0,0 +1,13 @@
+mutation SignupWithVerificationCodeMutation(
+    $verificationCode: String!
+    $emailAddress: String
+    $phoneNumber: String
+) {
+    signupWithVerificationCode(
+        verificationCode: $verificationCode
+        emailAddress: $emailAddress
+        phoneNumber: $phoneNumber
+    ) {
+        status
+    }
+}

+ 7 - 0
llms/gpt4free/quora/graphql/StaleChatUpdateMutation.graphql

@@ -0,0 +1,7 @@
+mutation StaleChatUpdateMutation($chatId: BigInt!) {
+    staleChatUpdate(chatId: $chatId) {
+        message {
+            ...MessageFragment
+        }
+    }
+}

+ 9 - 0
llms/gpt4free/quora/graphql/SubscriptionsMutation.graphql

@@ -0,0 +1,9 @@
+mutation subscriptionsMutation(
+  $subscriptions: [AutoSubscriptionQuery!]!
+) {
+  autoSubscribe(subscriptions: $subscriptions) {
+    viewer {
+      id
+    }
+  }
+}

+ 3 - 0
llms/gpt4free/quora/graphql/SummarizePlainPostQuery.graphql

@@ -0,0 +1,3 @@
+query SummarizePlainPostQuery($comment: String!) {
+    summarizePlainPost(comment: $comment)
+}

+ 3 - 0
llms/gpt4free/quora/graphql/SummarizeQuotePostQuery.graphql

@@ -0,0 +1,3 @@
+query SummarizeQuotePostQuery($comment: String, $quotedPostId: BigInt!) {
+    summarizeQuotePost(comment: $comment, quotedPostId: $quotedPostId)
+}

+ 3 - 0
llms/gpt4free/quora/graphql/SummarizeSharePostQuery.graphql

@@ -0,0 +1,3 @@
+query SummarizeSharePostQuery($comment: String!, $chatId: BigInt!, $messageIds: [BigInt!]!) {
+    summarizeSharePost(comment: $comment, chatId: $chatId, messageIds: $messageIds)
+}

+ 14 - 0
llms/gpt4free/quora/graphql/UserSnippetFragment.graphql

@@ -0,0 +1,14 @@
+fragment UserSnippetFragment on PoeUser {
+    id
+    uid
+    bio
+    handle
+    fullName
+    viewerIsFollowing
+    isPoeOnlyUser
+    profilePhotoURLTiny: profilePhotoUrl(size: tiny)
+    profilePhotoURLSmall: profilePhotoUrl(size: small)
+    profilePhotoURLMedium: profilePhotoUrl(size: medium)
+    profilePhotoURLLarge: profilePhotoUrl(size: large)
+    isFollowable
+}

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio

粤ICP备19079148号