Update README.md
Browse filesupdated model card
README.md
CHANGED
|
@@ -90,21 +90,36 @@ Addressing the power of LLM in fintuned downstream task. Implemented as a person
|
|
| 90 |
|
| 91 |
### How to use
|
| 92 |
|
| 93 |
-
# Load model directly
|
| 94 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
# Use a pipeline as a high-level helper
|
|
|
|
| 96 |
from transformers import pipeline
|
| 97 |
|
| 98 |
sql_generator = pipeline("text2text-generation", model="SwastikM/bart-large-nl2sql")
|
| 99 |
|
|
|
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
sql_context: CREATE TABLE diversification (id INT, effort VARCHAR(50), budget FLOAT); CREATE TABLE
|
| 104 |
-
budget (diversification_id INT, diversification_effort VARCHAR(50), amount FLOAT);"""
|
| 105 |
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
|
|
|
| 108 |
print(sql)
|
| 109 |
```
|
| 110 |
|
|
|
|
| 90 |
|
| 91 |
### How to use
|
| 92 |
|
|
|
|
| 93 |
```python
|
| 94 |
+
query_question_with_context = """sql_prompt: Which economic diversification efforts in
|
| 95 |
+
the 'diversification' table have a higher budget than the average budget for all economic diversification efforts in the 'budget' table?
|
| 96 |
+
sql_context: CREATE TABLE diversification (id INT, effort VARCHAR(50), budget FLOAT); CREATE TABLE
|
| 97 |
+
budget (diversification_id INT, diversification_effort VARCHAR(50), amount FLOAT);"""
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
# Use a pipeline as a high-level helper
|
| 101 |
+
```python
|
| 102 |
from transformers import pipeline
|
| 103 |
|
| 104 |
sql_generator = pipeline("text2text-generation", model="SwastikM/bart-large-nl2sql")
|
| 105 |
|
| 106 |
+
sql = sql_generator(query_question_with_context)[0]['generated_text']
|
| 107 |
|
| 108 |
+
print(sql)
|
| 109 |
+
```
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
# Load model directly
|
| 112 |
+
|
| 113 |
+
```python
|
| 114 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 115 |
+
|
| 116 |
+
tokenizer = AutoTokenizer.from_pretrained("SwastikM/bart-large-nl2sql")
|
| 117 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("SwastikM/bart-large-nl2sql")
|
| 118 |
+
|
| 119 |
+
inputs = tokenizer(query_question_with_context, return_tensors="pt").input_ids
|
| 120 |
+
outputs = model.generate(inputs, max_new_tokens=100, do_sample=False)
|
| 121 |
|
| 122 |
+
sql = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 123 |
print(sql)
|
| 124 |
```
|
| 125 |
|