small change of comment about multiple GPUs support
This commit is contained in:
parent
08ffab941f
commit
a3ba2275ed
@ -7,7 +7,7 @@ from utils import load_model_on_gpus
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda')
|
||||
# 多显卡支持,注释掉下面三行,将num_gpus改为你实际的显卡数量
|
||||
# 多显卡支持,使用下面三行代替上面两行,将num_gpus改为你实际的显卡数量
|
||||
# model_path = "THUDM/chatglm2-6b"
|
||||
# tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
# model = load_model_on_gpus(model_path, num_gpus=2)
|
||||
|
@ -4,7 +4,7 @@ import mdtex2html
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda')
|
||||
# 多显卡支持,注释掉下面三行,将num_gpus改为你实际的显卡数量
|
||||
# 多显卡支持,使用下面三行代替上面两行,将num_gpus改为你实际的显卡数量
|
||||
# model_path = "THUDM/chatglm2-6b"
|
||||
# tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
# model = load_model_on_gpus(model_path, num_gpus=2)
|
||||
|
Loading…
Reference in New Issue
Block a user