Improve readme and move doc/ examples/ and fix another main repl logic

This commit is contained in:
pancake 2023-10-19 15:28:48 +02:00
parent 20385ae061
commit f287830583
4 changed files with 32 additions and 13 deletions

View File

@ -8,17 +8,34 @@
``` ```
Run r2ai in local, no google bard or chatgpt. Just use your CPU/GPU/NPU and interact with r2 using natural language. Run r2ai in local, without internet or leaking any data. Use your CPU/GPU/NPU and interact with r2 using natural language.
The current implementation is based on `llama-cpp` and the default model is `CodeLlama-CherryPop`
--pancake --pancake
## Installation ## Installation
``` ```
pip3 install rich inquirer python-dotenv openai litellm tokentrim pip3 install rich inquirer llama-cpp tokentrim
r2pm -i r2ai r2pm -i r2ai
``` ```
## Execution
There are 4 different ways to run `r2ai`:
* Standalone and interactive: `r2pm -r r2ai`
* Batch mode: `r2ai '-r act as a calculator' '3+3=?'
* From radare2 (requires `r2pm -ci rlang-python`): `r2 -c 'r2ai -h'`
* Using r2pipe: `#!pipe python main.py`
## Scripting
You can interact with r2ai from standalone python, from r2pipe via r2 keeping a global state or using the javascript intrepreter embedded inside `radare2`.
* [examples/conversation.r2.js](conversation.r2.js) - load two models and make them talk to each other
## Development/Testing ## Development/Testing
Just run `make` .. or well `python main.py /path/to/file` Just run `make` .. or well `python main.py /path/to/file`

24
main.py
View File

@ -9,7 +9,6 @@ try:
except: except:
pass pass
import builtins
import traceback import traceback
import r2ai import r2ai
from r2ai.utils import slurp from r2ai.utils import slurp
@ -17,6 +16,8 @@ from r2ai.utils import slurp
r2 = None r2 = None
have_rlang = False have_rlang = False
have_r2pipe = False have_r2pipe = False
within_r2 = False
try: try:
import r2lang import r2lang
have_rlang = True have_rlang = True
@ -71,7 +72,7 @@ ai.model = "TheBloke/llama2-7b-chat-codeCherryPop-qLoRA-GGUF"
# ai.model = "models/models/mistral-7b-v0.1.Q4_K_M.gguf" # ai.model = "models/models/mistral-7b-v0.1.Q4_K_M.gguf"
#interpreter.model = "models/models/mistral-7b-instruct-v0.1.Q2_K.gguf" #interpreter.model = "models/models/mistral-7b-instruct-v0.1.Q2_K.gguf"
#interpreter.model = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF" #interpreter.model = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"
#builtins.print("TheBloke/Mistral-7B-Instruct-v0.1-GGUF") #print("TheBloke/Mistral-7B-Instruct-v0.1-GGUF")
dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = dir_path + "/" + ai.model model_path = dir_path + "/" + ai.model
@ -103,9 +104,9 @@ def runline(usertext):
if usertext == "": if usertext == "":
return return
if usertext.startswith("?") or usertext.startswith("-h"): if usertext.startswith("?") or usertext.startswith("-h"):
builtins.print(help_message) print(help_message)
elif usertext.startswith("clear") or usertext.startswith("-k"): elif usertext.startswith("clear") or usertext.startswith("-k"):
builtins.print("\x1b[2J\x1b[0;0H\r") print("\x1b[2J\x1b[0;0H\r")
elif usertext.startswith("-M"): elif usertext.startswith("-M"):
r2ai.models() r2ai.models()
elif usertext.startswith("-m"): elif usertext.startswith("-m"):
@ -113,7 +114,7 @@ def runline(usertext):
if len(words) > 1: if len(words) > 1:
ai.model = words[1] ai.model = words[1]
else: else:
builtins.print(ai.model) print(ai.model)
elif usertext == "reset" or usertext.startswith("-R"): elif usertext == "reset" or usertext.startswith("-R"):
ai.reset() ai.reset()
elif usertext == "-q" or usertext == "exit": elif usertext == "-q" or usertext == "exit":
@ -189,19 +190,19 @@ def runline(usertext):
que = words[1] que = words[1]
else: else:
que = input("[Query]>> ") que = input("[Query]>> ")
tag = "CODE" # CODE, TEXT, .. tag = "CODE" # TEXT, ..
ai.chat("Human: " + que + ":\n[" + tag + "]\n" + res + "\n[/" + tag + "]\n") ai.chat("Human: " + que + ":\n[" + tag + "]\n" + res + "\n[/" + tag + "]\n")
elif usertext[0] == "!": # Deprecate. we have -c now elif usertext[0] == "!": # Deprecate. we have -c now
if r2 is None: if r2 is None:
builtins.print("r2 is not available") print("r2 is not available")
elif usertext[1] == "!": elif usertext[1] == "!":
res = r2_cmd(usertext[2:]) res = r2_cmd(usertext[2:])
que = input("[Query]>> ") que = input("[Query]>> ")
ai.chat("Q: " + que + ":\n[INPUT]\n"+ res+"\n[/INPUT]\n") # , return_messages=True) ai.chat("Q: " + que + ":\n[INPUT]\n"+ res+"\n[/INPUT]\n") # , return_messages=True)
else: else:
builtins.print(r2_cmd(usertext[1:])) print(r2_cmd(usertext[1:]))
elif usertext.startswith("-"): elif usertext.startswith("-"):
builtins.print("Unknown flag. See 'r2ai -h' for help") print("Unknown flag. See 'r2ai -h' for help")
else: else:
ai.chat(usertext) ai.chat(usertext)
# r2ai.load(res) # r2ai.load(res)
@ -238,11 +239,12 @@ if have_r2pipe:
try: try:
if "R2PIPE_IN" in os.environ.keys(): if "R2PIPE_IN" in os.environ.keys():
r2 = r2pipe.open() r2 = r2pipe.open()
within_r2 = True
else: else:
file = sys.argv[1] if len(sys.argv) > 1 else "/bin/ls" file = sys.argv[1] if len(sys.argv) > 1 else "/bin/ls"
r2 = r2pipe.open(file) r2 = r2pipe.open(file)
except: except:
print("error") traceback.print_exc()
if have_rlang: if have_rlang:
def r2ai_rlang_plugin(a): def r2ai_rlang_plugin(a):
@ -269,5 +271,5 @@ elif len(sys.argv) > 1:
for arg in sys.argv[1:]: for arg in sys.argv[1:]:
runline(arg) runline(arg)
r2ai_repl() r2ai_repl()
elif not have_r2pipe and "R2PIPE_IN" not in os.environ: elif not within_r2:
r2ai_repl() r2ai_repl()