Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import http | |
| import ssl | |
| import json | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| def retrieve_api_key(url): | |
| context = ssl.create_default_context() | |
| context.check_hostname = True | |
| conn = http.client.HTTPSConnection(url, context=context) | |
| conn.request("GET", "/admin/api-keys/") | |
| api_key_response = conn.getresponse() | |
| api_keys_data = ( | |
| api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "") | |
| ) | |
| api_keys_json = json.loads(api_keys_data) | |
| api_key = api_keys_json[0]["api_key"] | |
| conn.close() | |
| return api_key | |
| def get_benchmark_uids(num_miner,mode): | |
| url="test.neuralinternet.ai" | |
| api_key = retrieve_api_key(url) | |
| context = ssl.create_default_context() | |
| context.check_hostname = True | |
| conn = http.client.HTTPSConnection(url, context=context) | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {api_key}", | |
| "Endpoint-Version": "2023-05-19", | |
| } | |
| conn.request("GET", f"/top_miner_uids?n={num_miner}&mode={mode}", headers=headers) | |
| miner_response = conn.getresponse() | |
| miner_data = ( | |
| miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "") | |
| ) | |
| uids = json.loads(miner_data) | |
| return uids | |
| def retrieve_response(payload): | |
| url="d509-65-108-32-175.ngrok-free.app" | |
| api_key = retrieve_api_key(url) | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {api_key}", | |
| "Endpoint-Version": "2023-05-19", | |
| } | |
| payload = json.dumps(payload) | |
| context = ssl.create_default_context() | |
| context.check_hostname = True | |
| conn = http.client.HTTPSConnection(url, context=context) | |
| conn.request("POST", "/chat", payload, headers) | |
| init_response = conn.getresponse() | |
| init_data = init_response.read().decode("utf-8").replace("\n", "").replace("\t", "") | |
| init_json = json.loads(init_data) | |
| response_dict = dict() | |
| for choice in init_json['choices']: | |
| uid = choice['uid'] | |
| resp = choice['message']['content'] | |
| resp = resp.replace("\n", "").replace("\t", "") | |
| response_dict[uid] = resp | |
| response_text = '\n\n'.join([f'"{key}": "{value}"' for key, value in response_dict.items()]) | |
| return response_text | |
| def interface_fn(system_prompt, optn, arg, user_prompt): | |
| if len(system_prompt) == 0: | |
| system_prompt = "You are an AI Assistant, created by bittensor and powered by NI(Neural Internet). Your task is to provide consise response to user's prompt" | |
| messages = [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}] | |
| payload = dict() | |
| if optn == 'TOP': | |
| if int(arg) > 50: | |
| arg = 50 | |
| payload['top_n'] = int(arg) | |
| payload['messages'] = messages | |
| response = retrieve_response(payload) | |
| return response | |
| elif optn == 'BENCHMARK_TextEval': | |
| if int(arg) > 50: | |
| arg = 50 | |
| uids = get_benchmark_uids(int(arg), 'TextEval') | |
| payload['uids'] = uids | |
| payload['messages'] = messages | |
| response = retrieve_response(payload) | |
| return response | |
| elif optn == 'BENCHMARK_AGIEval': | |
| if int(arg) > 50: | |
| arg = 50 | |
| uids = get_benchmark_uids(int(arg), 'AGIEval') | |
| payload['uids'] = uids | |
| payload['messages'] = messages | |
| response = retrieve_response(payload) | |
| return response | |
| else: | |
| uids = list() | |
| if ',' in arg: | |
| uids = [int(x) for x in arg.split(',')] | |
| else: | |
| uids = [arg] | |
| payload['uids'] = uids | |
| payload['messages'] = messages | |
| response = retrieve_response(payload) | |
| return response | |
| interface = gr.Interface( | |
| fn=interface_fn, | |
| inputs=[ | |
| gr.inputs.Textbox(label="System Prompt", optional=True), | |
| gr.inputs.Dropdown(["TOP", "BENCHMARK_TextEval", "BENCHMARK_AGIEval", "UIDs"], label="Select Function"), | |
| gr.inputs.Textbox(label="Arguement"), | |
| gr.inputs.Textbox(label="Enter your question") | |
| ], | |
| outputs=gr.outputs.Textbox(label="Model Responses"), | |
| title="Explore Bittensor Miners", | |
| description="Enter parameters as per you want and get response", | |
| examples=[["Your task is to provide consise response of user prompts", "TOP", 5, 'What is Bittensor?'] | |
| ,["Your task is to provide accurate, lengthy response with good lexical flow", "BENCHMARK_TextEval", 5, "What is neural network and how its feeding mechanism works?"], | |
| ["Act like you're in the technology field for 10+ year and give unbiased opinion", "UIDs", '975,517,906,743,869' , "What are the potential ethical concerns surrounding artificial intelligence and machine learning in healthcare?"]]) | |
| interface.launch(enable_queue=True) |