Skip to content

Commit

Permalink
fix: typos.
Browse files Browse the repository at this point in the history
  • Loading branch information
AsakusaRinne committed Apr 29, 2024
1 parent de31a06 commit 495177f
Show file tree
Hide file tree
Showing 32 changed files with 43 additions and 43 deletions.
2 changes: 1 addition & 1 deletion LLama.Examples/Examples/BatchedExecutorSaveAndLoad.cs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ public static async Task Run()
// Continue generating text
await GenerateTokens(executor, conversation, sampler, decoder, n_len);

// Display final ouput
// Display final output
AnsiConsole.MarkupLine($"[red]{prompt}{decoder.Read()}[/]");
}

Expand Down
2 changes: 1 addition & 1 deletion LLama.Examples/Examples/LlavaInteractiveModeExecute.cs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ public static async Task Run()
Console.WriteLine();


// Initilize Images in executor
// Initialize Images in executor
//
foreach (var image in imagePaths)
{
Expand Down
4 changes: 2 additions & 2 deletions LLama.Examples/Examples/SpeechChat.cs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ The short audio comes from a user that is speaking to an AI Language Model in re
int totalNonBlankClips; // ..but for example's sake they work on a
int nonIdleTime; // ..clip-based quant-length (1 = clipLength).
// Default detection settings: A speech of 750ms, followed by pause of 500ms. (2x250ms)
public (int minBlanksPerSeperation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);
public (int minBlanksPerSeparation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);

public HashSet<ISpeechListener> ServiceUsers = [];

Expand Down Expand Up @@ -156,7 +156,7 @@ void OnAudioDataAvailable(object? sender, WaveInEventArgs e)

// Compare the volume with the threshold and act accordingly. Once an interesting and 'full' set of clips pops up, serve it.
if (maxVolume >= voiceDetectionThreshold) { currentBlankClips = 0; totalNonBlankClips++; nonIdleTime++; }
else if (++currentBlankClips < detectionSettings.minBlanksPerSeperation) { nonIdleTime++; }
else if (++currentBlankClips < detectionSettings.minBlanksPerSeparation) { nonIdleTime++; }
else
{
if (totalNonBlankClips >= detectionSettings.minNonBlanksForValidMessages) { SendTranscription(); }
Expand Down
2 changes: 1 addition & 1 deletion LLama.Web/Async/AsyncLock.cs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
namespace LLama.Web.Async
{
/// <summary>
/// Create an Async locking using statment
/// Create an Async locking using statement
/// </summary>
public sealed class AsyncLock
{
Expand Down
6 changes: 3 additions & 3 deletions LLama.Web/Extensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@ public static List<string> GetOutputFilters(this ISessionConfig sessionConfig)
private static List<string> CombineCSV(List<string> list, string csv)
{
var results = list is null || list.Count == 0
? CommaSeperatedToList(csv)
: CommaSeperatedToList(csv).Concat(list);
? CommaSeparatedToList(csv)
: CommaSeparatedToList(csv).Concat(list);
return results
.Distinct()
.ToList();
}

private static List<string> CommaSeperatedToList(string value)
private static List<string> CommaSeparatedToList(string value)
{
if (string.IsNullOrEmpty(value))
return new List<string>();
Expand Down
2 changes: 1 addition & 1 deletion LLama.Web/Hubs/SessionConnectionHub.cs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ public override async Task OnDisconnectedAsync(Exception exception)
{
_logger.Log(LogLevel.Information, "[OnDisconnectedAsync], Id: {0}", Context.ConnectionId);

// Remove connections session on dissconnect
// Remove connections session on disconnect
await _modelSessionService.CloseAsync(Context.ConnectionId);
await base.OnDisconnectedAsync(exception);
}
Expand Down
6 changes: 3 additions & 3 deletions LLama.Web/README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
## LLama.Web - Basic ASP.NET Core examples of LLamaSharp in action
LLama.Web has no heavy dependencies and no extra frameworks ove bootstrap and jquery to keep the examples clean and easy to copy over to your own project
LLama.Web has no heavy dependencies and no extra frameworks over bootstrap and jquery to keep the examples clean and easy to copy over to your own project

## Websockets
Using signalr websockets simplifys the streaming of responses and model per connection management
Using signalr websockets simplifies the streaming of responses and model per connection management



Expand All @@ -23,7 +23,7 @@ Example:
{
"Name": "Alpaca",
"Path": "D:\\Repositories\\AI\\Prompts\\alpaca.txt",
"Prompt": "Alternativly to can set a prompt text directly and omit the Path"
"Prompt": "Alternatively to can set a prompt text directly and omit the Path"
"AntiPrompt": [
"User:"
],
Expand Down
2 changes: 1 addition & 1 deletion LLama.Web/Services/ModelService.cs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace LLama.Web.Services
{

/// <summary>
/// Sercive for handling Models,Weights & Contexts
/// Service for handling Models,Weights & Contexts
/// </summary>
public class ModelService : IModelService
{
Expand Down
2 changes: 1 addition & 1 deletion LLama/Abstractions/IInferenceParams.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
namespace LLama.Abstractions
{
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public interface IInferenceParams
{
Expand Down
2 changes: 1 addition & 1 deletion LLama/Abstractions/ILLamaExecutor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ public interface ILLamaExecutor
/// </summary>
public bool IsMultiModal { get; }
/// <summary>
/// Muti-Modal Projections / Clip Model weights
/// Multi-Modal Projections / Clip Model weights
/// </summary>
public LLavaWeights? ClipModel { get; }

Expand Down
2 changes: 1 addition & 1 deletion LLama/Abstractions/IModelParams.cs
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ public override void Write(Utf8JsonWriter writer, TensorSplitsCollection value,
public sealed record MetadataOverride
{
/// <summary>
/// Get the key being overriden by this override
/// Get the key being overridden by this override
/// </summary>
public string Key { get; }

Expand Down
2 changes: 1 addition & 1 deletion LLama/ChatSession.cs
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ public async IAsyncEnumerable<string> RegenerateAssistantMessageAsync(
InferenceParams? inferenceParams = null,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
// Make sure the last message is an assistant message (reponse from the LLM).
// Make sure the last message is an assistant message (response from the LLM).
ChatHistory.Message? lastAssistantMessage = History.Messages.LastOrDefault();

if (lastAssistantMessage is null
Expand Down
2 changes: 1 addition & 1 deletion LLama/Common/InferenceParams.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
namespace LLama.Common
{
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public record InferenceParams
: IInferenceParams
Expand Down
2 changes: 1 addition & 1 deletion LLama/Extensions/IContextParamsExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
namespace LLama.Extensions
{
/// <summary>
/// Extention methods to the IContextParams interface
/// Extension methods to the IContextParams interface
/// </summary>
public static class IContextParamsExtensions
{
Expand Down
2 changes: 1 addition & 1 deletion LLama/Extensions/IModelParamsExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
namespace LLama.Extensions;

/// <summary>
/// Extention methods to the IModelParams interface
/// Extension methods to the IModelParams interface
/// </summary>
public static class IModelParamsExtensions
{
Expand Down
2 changes: 1 addition & 1 deletion LLama/LLamaContext.cs
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ protected override bool ReleaseHandle()
}

/// <summary>
/// Copy bytes to a desintation pointer.
/// Copy bytes to a destination pointer.
/// </summary>
/// <param name="dst">Destination to write to</param>
/// <param name="length">Length of the destination buffer</param>
Expand Down
2 changes: 1 addition & 1 deletion LLama/LLamaExecutorBase.cs
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ protected virtual void HandleRunOutOfContext(int tokensToKeep)
/// <summary>
/// Try to reuse the matching prefix from the session file.
/// </summary>
protected virtual void TryReuseMathingPrefix()
protected virtual void TryReuseMatchingPrefix()
{
if (_n_session_consumed < _session_tokens.Count)
{
Expand Down
4 changes: 2 additions & 2 deletions LLama/LLamaInstructExecutor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
HandleRunOutOfContext(inferenceParams.TokensKeep);
}

TryReuseMathingPrefix();
TryReuseMatchingPrefix();

var (result, _) = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
if (result != DecodeResult.Ok)
Expand Down Expand Up @@ -259,7 +259,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
return Task.CompletedTask;
}
/// <summary>
/// The desciptor of the state of the instruct executor.
/// The descriptor of the state of the instruct executor.
/// </summary>
public class InstructExecutorState : ExecutorBaseState
{
Expand Down
2 changes: 1 addition & 1 deletion LLama/LLamaInteractExecutor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
HandleRunOutOfContext(inferenceParams.TokensKeep);
}

TryReuseMathingPrefix();
TryReuseMatchingPrefix();

// Changes to support Multi-Modal LLMs.
//
Expand Down
2 changes: 1 addition & 1 deletion LLama/LLamaStatelessExecutor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ public StatelessExecutor(LLamaWeights weights, IContextParams @params, ILogger?
/// <inheritdoc />
public async IAsyncEnumerable<string> InferAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
// Ensure the context from last time is disposed (it always hould be)
// Ensure the context from last time is disposed (it always should be)
if (!Context.NativeHandle.IsClosed)
Context.Dispose();

Expand Down
4 changes: 2 additions & 2 deletions LLama/Native/NativeApi.LLava.cs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ public static unsafe partial class NativeApi
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
/// <param name="n_threads">Number of threads</param>
/// <param name="image_bytes">Binary image in jpeg format</param>
/// <param name="image_bytes_length">Bytes lenght of the image</param>
/// <param name="image_bytes_length">Bytes length of the image</param>
/// <returns>SafeHandle to the Embeddings</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_bytes",
CallingConvention = CallingConvention.Cdecl)]
Expand All @@ -35,7 +35,7 @@ SafeLlavaImageEmbedHandle llava_image_embed_make_with_bytes(SafeLlavaModelHandle
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
/// <param name="n_threads">Number of threads</param>
/// <param name="image_path">Image filename (jpeg) to generate embeddings</param>
/// <returns>SafeHandel to the embeddings</returns>
/// <returns>SafeHandle to the embeddings</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_filename", CallingConvention = CallingConvention.Cdecl)]
public static extern
SafeLlavaImageEmbedHandle llava_image_embed_make_with_filename(SafeLlavaModelHandle ctx_clip, int n_threads,
Expand Down
2 changes: 1 addition & 1 deletion LLama/Native/NativeApi.Load.cs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static NativeApi()
"3. One of the dependency of the native library is missed. Please use `ldd` on linux, `dumpbin` on windows and `otool`" +
"to check if all the dependency of the native library is satisfied. Generally you could find the libraries under your output folder.\n" +
"4. Try to compile llama.cpp yourself to generate a libllama library, then use `LLama.Native.NativeLibraryConfig.WithLibrary` " +
"to specify it at the very beginning of your code. For more informations about compilation, please refer to LLamaSharp repo on github.\n");
"to specify it at the very beginning of your code. For more information about compilation, please refer to LLamaSharp repo on github.\n");
}

// Now that the "loaded" flag is set configure logging in llama.cpp
Expand Down
4 changes: 2 additions & 2 deletions LLama/Native/NativeLibraryConfig.cs
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public NativeLibraryConfig SkipCheck(bool enable = true)
}

/// <summary>
/// Add self-defined search directories. Note that the file stucture of the added
/// Add self-defined search directories. Note that the file structure of the added
/// directories must be the same as the default directory. Besides, the directory
/// won't be used recursively.
/// </summary>
Expand All @@ -116,7 +116,7 @@ public NativeLibraryConfig WithSearchDirectories(IEnumerable<string> directories
}

/// <summary>
/// Add self-defined search directories. Note that the file stucture of the added
/// Add self-defined search directories. Note that the file structure of the added
/// directories must be the same as the default directory. Besides, the directory
/// won't be used recursively.
/// </summary>
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ For more examples, please refer to [LLamaSharp.Examples](./LLama.Examples).
#### Why GPU is not used when I have installed CUDA

1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.

#### Why the inference is slow

Expand Down
2 changes: 1 addition & 1 deletion docs/Examples/LLavaInteractiveModeExecute.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ namespace LLama.Examples.Examples
Console.WriteLine();


// Initilize Images in executor
// Initialize Images in executor
//
foreach (var image in imagePaths)
{
Expand Down
4 changes: 2 additions & 2 deletions docs/FAQ.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Frequently asked qustions
# Frequently asked questions

Sometimes, your application with LLM and LLamaSharp may have unexpected behaviours. Here are some frequently asked questions, which may help you to deal with your problem.

## Why GPU is not used when I have installed CUDA

1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.

## Why the inference is slow

Expand Down
2 changes: 1 addition & 1 deletion docs/QuickStart.md
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ do
Console.WriteLine();


// Initilize Images in executor
// Initialize Images in executor
//
ex.ImagePaths = imagePaths.ToList();
}
Expand Down
6 changes: 3 additions & 3 deletions docs/Tutorials/Executors.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ public interface ILLamaExecutor
/// </summary>
public bool IsMultiModal { get; }
/// <summary>
/// Muti-Modal Projections / Clip Model weights
/// Multi-Modal Projections / Clip Model weights
/// </summary>
public LLavaWeights? ClipModel { get; }

Expand Down Expand Up @@ -110,7 +110,7 @@ At this time, by repeating the same mode of `Q: xxx? A: xxx.`, LLM outputs the a

## BatchedExecutor

Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and geneate outputs for them at the same time. Here is an example to use it.
Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and generate outputs for them at the same time. Here is an example to use it.

```cs
using LLama.Batched;
Expand Down Expand Up @@ -249,7 +249,7 @@ Here is the parameters for LLamaSharp executors.

```cs
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public record InferenceParams
: IInferenceParams
Expand Down
2 changes: 1 addition & 1 deletion docs/Tutorials/NativeLibraryConfig.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ As indicated in [Architecture](../Architecture.md), LLamaSharp uses the native l
Before introducing the way to customize native library loading, please follow the tips below to see if you need to compile the native library yourself, rather than use the published backend packages, which contain native library files for multiple targets.

1. Your device/environment has not been supported by any published backend packages. For example, vulkan has not been supported yet. In this case, it will mean a lot to open an issue to tell us you are using it. Since our support for new backend will have a delay, you could compile yourself before that.
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library youself, following the [instructions here](../ContributingGuide.md).
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library yourself, following the [instructions here](../ContributingGuide.md).
3. You want to debug the c++ code.


Expand Down
Loading

0 comments on commit 495177f

Please sign in to comment.