build(boards): 所有板卡执行 npm run build:prod
This commit is contained in:
@@ -605,7 +605,7 @@
|
||||
"json"
|
||||
],
|
||||
"__file__": true,
|
||||
"__size__": 3362,
|
||||
"__size__": 3412,
|
||||
"__name__": "ollama.py"
|
||||
},
|
||||
"onenet": {
|
||||
@@ -617,6 +617,17 @@
|
||||
"__size__": 1102,
|
||||
"__name__": "onenet.py"
|
||||
},
|
||||
"openai": {
|
||||
"__require__": [
|
||||
"urequests",
|
||||
"time",
|
||||
"json",
|
||||
"ollama"
|
||||
],
|
||||
"__file__": true,
|
||||
"__size__": 1772,
|
||||
"__name__": "openai.py"
|
||||
},
|
||||
"pe_g1": {
|
||||
"__require__": [
|
||||
"time",
|
||||
|
||||
@@ -4,7 +4,7 @@ import json
|
||||
|
||||
|
||||
class Ollama():
|
||||
def __init__(self, url="", model="", max_history_num=0):
|
||||
def __init__(self, url="", model="", max_history_num=0, max_tokens=1024):
|
||||
self._heads = {
|
||||
"Accept": "text/event-stream",
|
||||
# "Cache-Control": "no-cache",
|
||||
@@ -19,7 +19,8 @@ class Ollama():
|
||||
self._data = {
|
||||
"stream": True,
|
||||
"model": model,
|
||||
"messages": self._messages
|
||||
"messages": self._messages,
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
|
||||
def set_timeout(self, timeout):
|
||||
@@ -60,7 +61,7 @@ class Ollama():
|
||||
self._url, headers=self._heads, data=data)
|
||||
if response.status_code == 200:
|
||||
break
|
||||
time.slee(1)
|
||||
time.sleep(1)
|
||||
|
||||
output = ""
|
||||
|
||||
@@ -101,7 +102,7 @@ class Ollama():
|
||||
self.add_history("assistant", content)
|
||||
messages_len = len(self._messages)
|
||||
history_num = 2 * self._max_history_num
|
||||
while history_num < len(self._messages):
|
||||
while history_num < messages_len:
|
||||
del self._messages[0]
|
||||
else:
|
||||
self.clear_user_history()
|
||||
|
||||
53
boards/default/micropython/build/lib/openai.py
Normal file
53
boards/default/micropython/build/lib/openai.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import urequests
|
||||
import time
|
||||
import json
|
||||
import ollama
|
||||
|
||||
|
||||
class OpenAI(ollama.Ollama):
|
||||
def __init__(self, url="", api_key="", model="", max_history_num=0, max_tokens=1024):
|
||||
super().__init__(url, model, max_history_num)
|
||||
self._heads["Authorization"] = "Bearer {}".format(api_key)
|
||||
self._data["max_tokens"] = max_tokens
|
||||
self._chat_url = "{}/chat/completions".format(self._url)
|
||||
|
||||
def _post(self, content_callback=None):
|
||||
response = None
|
||||
data = json.dumps(self._data).encode('utf-8')
|
||||
for i in range(0, self._max_retries):
|
||||
response = urequests.post(
|
||||
self._chat_url, headers=self._heads, data=data)
|
||||
if response.status_code == 200:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
output = ""
|
||||
|
||||
if response.status_code != 200:
|
||||
output = response.text
|
||||
if content_callback:
|
||||
content_callback(output)
|
||||
return output
|
||||
|
||||
if not content_callback:
|
||||
output = json.loads(response.text)[
|
||||
"choices"][0]["message"]["content"]
|
||||
response.close()
|
||||
return output
|
||||
|
||||
try:
|
||||
while True:
|
||||
line = response.raw.readline()
|
||||
if line[:5] != b"data:":
|
||||
continue
|
||||
if line[-7:-1] == b"[DONE]":
|
||||
break
|
||||
line = line[6:-1]
|
||||
line = line.decode('utf-8').strip()
|
||||
data = json.loads(line)
|
||||
content = data["choices"][0]["delta"]["content"]
|
||||
content_callback(content)
|
||||
output += content
|
||||
finally:
|
||||
response.close()
|
||||
return output
|
||||
Reference in New Issue
Block a user