mirror of
https://github.com/deepseek-ai/DeepSeek-Coder.git
synced 2025-06-20 00:14:03 -04:00
Update README.md
This commit is contained in:
parent
1ab77f7208
commit
c6ebdafb13
16
README.md
16
README.md
@ -51,8 +51,8 @@ Here give some examples of how to use our model.
|
|||||||
```python
|
```python
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
import torch
|
import torch
|
||||||
tokenizer = AutoTokenizer.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
||||||
input_text = "#write a quick sort algorithm"
|
input_text = "#write a quick sort algorithm"
|
||||||
inputs = tokenizer(input_text, return_tensors="pt").cuda()
|
inputs = tokenizer(input_text, return_tensors="pt").cuda()
|
||||||
outputs = model.generate(**inputs, max_length=128)
|
outputs = model.generate(**inputs, max_length=128)
|
||||||
@ -78,8 +78,8 @@ def quick_sort(arr):
|
|||||||
```python
|
```python
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
import torch
|
import torch
|
||||||
tokenizer = AutoTokenizer.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
||||||
input_text = """<fim_prefix>def quick_sort(arr):
|
input_text = """<fim_prefix>def quick_sort(arr):
|
||||||
if len(arr) <= 1:
|
if len(arr) <= 1:
|
||||||
return arr
|
return arr
|
||||||
@ -103,8 +103,8 @@ This code will output the following result:
|
|||||||
#### 3)Repository Level Code Completion
|
#### 3)Repository Level Code Completion
|
||||||
```python
|
```python
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
tokenizer = AutoTokenizer.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
||||||
|
|
||||||
input_text = """#utils.py
|
input_text = """#utils.py
|
||||||
import torch
|
import torch
|
||||||
@ -193,8 +193,8 @@ In the following scenario, the Deepseek-Coder 7B model effectively calls a class
|
|||||||
#### 4)Chat Model Inference
|
#### 4)Chat Model Inference
|
||||||
```python
|
```python
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
tokenizer = AutoTokenizer.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained("deepseek/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-7b-base", trust_remote_code=True).cuda()
|
||||||
prompt = "write a quick sort algorithm in python."
|
prompt = "write a quick sort algorithm in python."
|
||||||
prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context.\nWrite a response that appropriately completes the request.\n\n### Instruction:\nWrite a program to perform the given task.\n\nInput:\n{prompt}\n\n### Response:\n"""
|
prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context.\nWrite a response that appropriately completes the request.\n\n### Instruction:\nWrite a program to perform the given task.\n\nInput:\n{prompt}\n\n### Response:\n"""
|
||||||
inputs = tokenizer.encode(prompt, return_tensors="pt").cuda()
|
inputs = tokenizer.encode(prompt, return_tensors="pt").cuda()
|
||||||
|
Loading…
Reference in New Issue
Block a user