mochiii
it's over. they don't even offer the preview on their own site now.
Thanks hackerman and moe
Hosts on http://127.0.0.1:8080/
You're in. You now have full 100K Context Claude, Mixtral, and GPT 4!
Update: Working models are gpt-4-0314, gpt-4-0613, gpt-4-1106-preview, gpt-4-vision-preview, claude-instant-1.2, claude-2.1
to well-meaning hosters: please do not try to deobfuscate the code, it only makes it easier for moemate to patch this stuff. try hosting it as is. on request i can add a dockerfile.
data updated (again)
make sure to grab the new contents of DATA, the script has not changed
how to use this in ST (for dummies)
if you're not a windows user you probably know what you're doing
- install Python, make sure "add to system PATH" is enabled; log in and out
- in a new folder, paste
main.py
code into main.py - right click on a blank space in the explorer window -> "open PowerShell window here"
- type the following command
$env:DATA = '<insert data from below here>'
- type the following command exactly
pip install --user requests flask
- type the following command exactly
python main.py
and leave the window open - in ST, put
http://127.0.0.1:8080/company/v1
in the proxy url box, replacing company with the appropriate one - in ST, make sure the proxy password field is not empty
- enjoy
repl.it hosting vvv
- just clone the repl from my account
if i'm gone/banned/spitefagged, follow this guide to host your own:
- https://replit.com/
- create repl
- select 'python'
- paste
main.py
code into main.py - bottom right 'tools' > scroll > click 'secrets'
- left sidebar > edit as JSON > paste
secrets
-> save - big green run button
- 'webview' panel will appear > click 'new tab'
- use pinger like https://app.checklyhq.com/ to keep it up. example pings:
- GET https://
your-url.dev
/openai/v1/models - POST https://
your-url.dev
/openai/v1/chat/completions - every 30 seconds or something
- GET https://
secrets JSON
main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | from flask import Flask, request, Response
import requests
import time
import json
import os
import zlib
import base64
st = int(time.time())
p1 = 0
p2 = 0
p3 = 0
app = Flask(__name__)
r1, r2, r3, r4, r5, i, j, a1, a2, a3, h1, h2 = json.loads(zlib.decompress(base64.b64decode(os.getenv("DATA").encode())).decode())
@app.route(r1, methods=['POST'])
def a1_f():
global p1
req_stream = requests.post(
a1,
json=request.get_json(),
headers=h1,
stream=True
)
if req_stream.ok:
p1 += 1
def g():
for chunk in req_stream.iter_content(chunk_size=4096):
yield chunk
return Response(g(), content_type=req_stream.headers['Content-Type'], status=req_stream.status_code)
mm = lambda x: {"object": "list", "data": [{"id": m, "object": "model", "created": 0, "owned_by": "oai"} for m in x]}
m = mm(i)
@app.route(r2, methods=['GET'])
def a2_f():
return m
@app.route(r3, methods=['POST'])
def a3_f():
global p2
js = request.get_json()
sr = js.get("stream", False)
req_stream = requests.post(
a2,
json=js,
headers=h1,
stream=True
)
if req_stream.ok:
p2 += 1
def g():
if sr:
last_pos = 0
for line in req_stream.iter_lines():
if line.startswith(b'data: ') and not line.startswith(b'data: [DONE]'):
data = json.loads(line.removeprefix(b'data: ').decode('utf-8'))
pos, last_pos = last_pos, len(data['completion'])
data['completion'] = data['completion'][pos:]
yield f'data: {json.dumps(data, indent=None, separators=(",", ":"))}\r\n\r\n'
else:
for chunk in req_stream.iter_content(chunk_size=4096):
yield chunk
return Response(g(), content_type=req_stream.headers['Content-Type'], status=req_stream.status_code)
n = mm(j)
@app.route(r4, methods=["GET"])
def a4_f():
return n
@app.route(r5, methods=['POST'])
def a5_f():
global p3
request_json = request.get_json()
if request_json.get("stream", False) or request_json.get("echo", False):
return Response("cannot use streaming or echo", status=400)
seed = None if request_json.get("seed") == -1 else request_json.get("seed")
real_json = {
"inputs": request_json["prompt"],
"parameters": {
"best_of": 1,
"decoder_input_details": True,
"details": False,
"do_sample": True,
"max_new_tokens": request_json["max_new_tokens"],
"repetition_penalty": request_json["repetition_penalty"],
"return_full_text": False,
"seed": seed,
"stop": request_json["stopping_strings"],
"stream": False,
"temperature": request_json["temperature"],
"top_k": request_json["top_k"],
"top_p": request_json["top_p"],
"truncate": None,
"typical_p": request_json["typical_p"],
"watermark": False,
},
}
global mist_prompts
req_stream = requests.post(
a3,
json=real_json,
headers=(h1 | h2),
)
if req_stream.ok:
p3 += 1
else:
return Response(req_stream.text, status=500)
data = req_stream.json()
finish_reason = "length" if data["output"]["usage"]["completion_tokens"] >= request_json["max_new_tokens"] else "stop"
response = {
"id": "cmpl-xyz",
"object": "text_completion",
"created": int(time.time()),
"model": j[0],
"choices": [{
"index": 0,
"finish_reason": finish_reason,
"text": data["output"]["choices"][0]["text"],
"logprobs": None,
}],
"usage": data["output"]["usage"],
}
return response
@app.route('/', methods=['GET'])
def a6_f():
u = int(time.time()) - st
s = f"""
<!doctype html>
<html lang="en">
<head><meta charset="utf-8"><meta name="robots" content="noindex"><title>mochi</title></head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
<h2>mochi</h2>
{u}<br>
{p1}<br>
{p2}<br>
{p3}<br><br>
2m timeout<br>
for r3: use ooba, turn off streaming
</body></html>
""".strip()
return Response(s, mimetype="text/html")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
|