mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
make sample print usage and cleaner
This commit is contained in:
parent
dec8546abe
commit
63f57635d8
@ -1,21 +1,22 @@
|
|||||||
using Gpt4All;
|
using Gpt4All;
|
||||||
|
|
||||||
var modelFactory = new Gpt4AllModelFactory();
|
var modelFactory = new Gpt4AllModelFactory();
|
||||||
|
if (args.Length < 2)
|
||||||
|
{
|
||||||
|
Console.WriteLine($"Usage: Gpt4All.Samples <model-path> <prompt>");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
var modelPath = args[0];
|
var modelPath = args[0];
|
||||||
|
var prompt = args[1];
|
||||||
|
|
||||||
using var model = modelFactory.LoadModel(modelPath);
|
using var model = modelFactory.LoadModel(modelPath);
|
||||||
|
|
||||||
var input = args.Length > 1 ? args[1] : "Name 3 colors.";
|
|
||||||
|
|
||||||
var result = await model.GetStreamingPredictionAsync(
|
var result = await model.GetStreamingPredictionAsync(
|
||||||
input,
|
prompt,
|
||||||
PredictRequestOptions.Defaults);
|
PredictRequestOptions.Defaults);
|
||||||
|
|
||||||
await foreach (var token in result.GetPredictionStreamingAsync())
|
await foreach (var token in result.GetPredictionStreamingAsync())
|
||||||
{
|
{
|
||||||
Console.Write(token);
|
Console.Write(token);
|
||||||
}
|
}
|
||||||
|
|
||||||
Console.WriteLine();
|
|
||||||
Console.WriteLine("DONE.");
|
|
||||||
|
Loading…
Reference in New Issue
Block a user