Skip to content

Commit 52548bc

Browse files
feat: implement OpenAI provider with complete LLM interface
- Add OpenAiProvider class implementing ILlmProvider - Support non-streaming and streaming completions - Parse tool calls and function calling correctly - Implement model listing and availability checks - Add comprehensive unit tests (6 passing) - Handle OpenAI API response format correctly Task 2.2 complete (TDD: RED → GREEN phases)
1 parent 1ab0bf8 commit 52548bc

5 files changed

Lines changed: 318 additions & 0 deletions

File tree

ClawSharp.slnx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
<Project Path="tests/ClawSharp.Cli.Tests/ClawSharp.Cli.Tests.csproj" />
1717
<Project Path="tests/ClawSharp.Core.Tests/ClawSharp.Core.Tests.csproj" />
1818
<Project Path="tests/ClawSharp.Infrastructure.Tests/ClawSharp.Infrastructure.Tests.csproj" />
19+
<Project Path="tests/ClawSharp.Providers.Tests/ClawSharp.Providers.Tests.csproj" />
1920
<Project Path="tests/ClawSharp.TestHelpers/ClawSharp.TestHelpers.csproj" />
2021
</Folder>
2122
</Solution>

src/ClawSharp.Providers/ClawSharp.Providers.csproj

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
<ProjectReference Include="..\ClawSharp.Core\ClawSharp.Core.csproj" />
55
</ItemGroup>
66

7+
<ItemGroup>
8+
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="10.0.3" />
9+
</ItemGroup>
10+
711
<PropertyGroup>
812
<ImplicitUsings>enable</ImplicitUsings>
913
<Nullable>enable</Nullable>
Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,202 @@
1+
using System.Net.Http.Json;
2+
using System.Runtime.CompilerServices;
3+
using System.Text.Json;
4+
using ClawSharp.Core.Providers;
5+
using Microsoft.Extensions.Logging;
6+
7+
namespace ClawSharp.Providers;
8+
9+
/// <summary>
10+
/// OpenAI API provider implementation.
11+
/// </summary>
12+
public class OpenAiProvider : ILlmProvider
13+
{
14+
private readonly HttpClient _http;
15+
private readonly ILogger<OpenAiProvider> _logger;
16+
17+
public string Name => "openai";
18+
19+
public OpenAiProvider(HttpClient httpClient, ILogger<OpenAiProvider> logger)
20+
{
21+
_http = httpClient ?? throw new ArgumentNullException(nameof(httpClient));
22+
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
23+
}
24+
25+
public async Task<bool> IsAvailableAsync(CancellationToken ct = default)
26+
{
27+
try
28+
{
29+
var response = await _http.GetAsync("models", ct);
30+
return response.IsSuccessStatusCode;
31+
}
32+
catch (Exception ex)
33+
{
34+
_logger.LogDebug(ex, "OpenAI API not available");
35+
return false;
36+
}
37+
}
38+
39+
public async Task<IReadOnlyList<string>> ListModelsAsync(CancellationToken ct = default)
40+
{
41+
var response = await _http.GetAsync("models", ct);
42+
response.EnsureSuccessStatusCode();
43+
44+
var json = await response.Content.ReadFromJsonAsync<JsonElement>(ct);
45+
var data = json.GetProperty("data");
46+
47+
var models = new List<string>();
48+
foreach (var item in data.EnumerateArray())
49+
{
50+
if (item.TryGetProperty("id", out var id))
51+
{
52+
models.Add(id.GetString()!);
53+
}
54+
}
55+
56+
models.Sort();
57+
return models;
58+
}
59+
60+
public async Task<LlmResponse> CompleteAsync(LlmRequest request, CancellationToken ct = default)
61+
{
62+
var body = BuildRequestBody(request, stream: false);
63+
var response = await _http.PostAsJsonAsync("chat/completions", body, ct);
64+
response.EnsureSuccessStatusCode();
65+
66+
var json = await response.Content.ReadFromJsonAsync<JsonElement>(ct);
67+
return ParseResponse(json);
68+
}
69+
70+
public async IAsyncEnumerable<LlmStreamChunk> StreamAsync(
71+
LlmRequest request,
72+
[EnumeratorCancellation] CancellationToken ct = default)
73+
{
74+
var body = BuildRequestBody(request, stream: true);
75+
var httpRequest = new HttpRequestMessage(HttpMethod.Post, "chat/completions")
76+
{
77+
Content = JsonContent.Create(body)
78+
};
79+
80+
using var response = await _http.SendAsync(httpRequest, HttpCompletionOption.ResponseHeadersRead, ct);
81+
response.EnsureSuccessStatusCode();
82+
83+
await using var stream = await response.Content.ReadAsStreamAsync(ct);
84+
using var reader = new StreamReader(stream);
85+
86+
string? line;
87+
while ((line = await reader.ReadLineAsync(ct)) != null)
88+
{
89+
if (string.IsNullOrWhiteSpace(line) || !line.StartsWith("data: ")) continue;
90+
91+
var data = line.Substring(6).Trim();
92+
if (data == "[DONE]") break;
93+
94+
var json = JsonSerializer.Deserialize<JsonElement>(data);
95+
var chunk = ParseStreamChunk(json);
96+
if (chunk != null) yield return chunk;
97+
}
98+
}
99+
100+
private object BuildRequestBody(LlmRequest request, bool stream)
101+
{
102+
var body = new Dictionary<string, object>
103+
{
104+
["model"] = request.Model,
105+
["messages"] = request.Messages.Select(m => new
106+
{
107+
role = m.Role,
108+
content = m.Content,
109+
tool_calls = m.ToolCalls?.Select(tc => new
110+
{
111+
id = tc.Id,
112+
type = "function",
113+
function = new
114+
{
115+
name = tc.Name,
116+
arguments = tc.ArgumentsJson
117+
}
118+
}).ToArray(),
119+
tool_call_id = m.ToolCallId,
120+
name = m.Name
121+
}).ToArray(),
122+
["temperature"] = request.Temperature,
123+
["stream"] = stream
124+
};
125+
126+
if (request.MaxTokens.HasValue)
127+
body["max_tokens"] = request.MaxTokens.Value;
128+
129+
if (request.Tools != null && request.Tools.Count > 0)
130+
{
131+
body["tools"] = request.Tools.Select(t => new
132+
{
133+
type = "function",
134+
function = new
135+
{
136+
name = t.Name,
137+
description = t.Description,
138+
parameters = t.ParametersSchema
139+
}
140+
}).ToArray();
141+
}
142+
143+
return body;
144+
}
145+
146+
private LlmResponse ParseResponse(JsonElement json)
147+
{
148+
var choice = json.GetProperty("choices")[0];
149+
var message = choice.GetProperty("message");
150+
151+
var content = message.TryGetProperty("content", out var contentProp)
152+
? contentProp.GetString() ?? ""
153+
: "";
154+
155+
var toolCalls = new List<ToolCallRequest>();
156+
if (message.TryGetProperty("tool_calls", out var toolCallsArray))
157+
{
158+
foreach (var tc in toolCallsArray.EnumerateArray())
159+
{
160+
var id = tc.GetProperty("id").GetString()!;
161+
var function = tc.GetProperty("function");
162+
var name = function.GetProperty("name").GetString()!;
163+
var arguments = function.GetProperty("arguments").GetString()!;
164+
165+
toolCalls.Add(new ToolCallRequest(id, name, arguments));
166+
}
167+
}
168+
169+
var finishReason = choice.GetProperty("finish_reason").GetString()!;
170+
171+
UsageInfo? usage = null;
172+
if (json.TryGetProperty("usage", out var usageProp))
173+
{
174+
usage = new UsageInfo(
175+
usageProp.GetProperty("prompt_tokens").GetInt32(),
176+
usageProp.GetProperty("completion_tokens").GetInt32(),
177+
usageProp.GetProperty("total_tokens").GetInt32()
178+
);
179+
}
180+
181+
return new LlmResponse(content, toolCalls, finishReason, usage);
182+
}
183+
184+
private LlmStreamChunk? ParseStreamChunk(JsonElement json)
185+
{
186+
if (!json.TryGetProperty("choices", out var choices) || choices.GetArrayLength() == 0)
187+
return null;
188+
189+
var choice = choices[0];
190+
var delta = choice.GetProperty("delta");
191+
192+
var contentDelta = delta.TryGetProperty("content", out var content)
193+
? content.GetString()
194+
: null;
195+
196+
var finishReason = choice.TryGetProperty("finish_reason", out var fr) && fr.ValueKind != JsonValueKind.Null
197+
? fr.GetString()
198+
: null;
199+
200+
return new LlmStreamChunk(contentDelta, null, finishReason, null);
201+
}
202+
}
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
<Project Sdk="Microsoft.NET.Sdk">
2+
3+
<PropertyGroup>
4+
<TargetFramework>net10.0</TargetFramework>
5+
<ImplicitUsings>enable</ImplicitUsings>
6+
<Nullable>enable</Nullable>
7+
<IsPackable>false</IsPackable>
8+
</PropertyGroup>
9+
10+
<ItemGroup>
11+
<PackageReference Include="coverlet.collector" Version="6.0.4" />
12+
<PackageReference Include="FluentAssertions" Version="8.8.0" />
13+
<PackageReference Include="Microsoft.Extensions.Logging" Version="10.0.3" />
14+
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
15+
<PackageReference Include="NSubstitute" Version="5.3.0" />
16+
<PackageReference Include="xunit" Version="2.9.3" />
17+
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
18+
</ItemGroup>
19+
20+
<ItemGroup>
21+
<Using Include="Xunit" />
22+
</ItemGroup>
23+
24+
<ItemGroup>
25+
<ProjectReference Include="..\..\src\ClawSharp.Providers\ClawSharp.Providers.csproj" />
26+
<ProjectReference Include="..\..\src\ClawSharp.Core\ClawSharp.Core.csproj" />
27+
<ProjectReference Include="..\ClawSharp.TestHelpers\ClawSharp.TestHelpers.csproj" />
28+
</ItemGroup>
29+
30+
</Project>
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
using ClawSharp.Core.Providers;
2+
using ClawSharp.Providers;
3+
using ClawSharp.TestHelpers;
4+
using FluentAssertions;
5+
using Microsoft.Extensions.Logging.Abstractions;
6+
using System.Text.Json;
7+
8+
namespace ClawSharp.Providers.Tests;
9+
10+
public class OpenAiProviderTests
11+
{
12+
private readonly MockHttpMessageHandler _handler = new();
13+
private readonly OpenAiProvider _provider;
14+
15+
public OpenAiProviderTests()
16+
{
17+
var client = _handler.CreateClient("https://api.openai.com/v1/");
18+
_provider = new OpenAiProvider(client, NullLogger<OpenAiProvider>.Instance);
19+
}
20+
21+
[Fact]
22+
public async Task CompleteAsync_ReturnsContent()
23+
{
24+
_handler.EnqueueJson("""
25+
{ "choices": [{ "message": { "content": "Hello!" }, "finish_reason": "stop" }],
26+
"usage": { "prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15 } }
27+
""");
28+
var request = new LlmRequest { Model = "gpt-4o", Messages = [new("user", "Hi")] };
29+
var response = await _provider.CompleteAsync(request);
30+
response.Content.Should().Be("Hello!");
31+
response.FinishReason.Should().Be("stop");
32+
response.Usage!.TotalTokens.Should().Be(15);
33+
}
34+
35+
[Fact]
36+
public async Task CompleteAsync_WithToolCalls_ParsesCorrectly()
37+
{
38+
_handler.EnqueueJson("""
39+
{ "choices": [{ "message": { "content": "",
40+
"tool_calls": [{ "id": "call_123",
41+
"function": { "name": "shell", "arguments": "{\"command\":\"date\"}" } }] },
42+
"finish_reason": "tool_calls" }] }
43+
""");
44+
var request = new LlmRequest { Model = "gpt-4o", Messages = [new("user", "What time?")] };
45+
var response = await _provider.CompleteAsync(request);
46+
response.ToolCalls.Should().HaveCount(1);
47+
response.ToolCalls[0].Name.Should().Be("shell");
48+
response.FinishReason.Should().Be("tool_calls");
49+
}
50+
51+
[Fact]
52+
public async Task IsAvailableAsync_WhenApiReachable_ReturnsTrue()
53+
{
54+
_handler.EnqueueJson("""{ "data": [] }""");
55+
var result = await _provider.IsAvailableAsync();
56+
result.Should().BeTrue();
57+
}
58+
59+
[Fact]
60+
public async Task IsAvailableAsync_WhenApiUnreachable_ReturnsFalse()
61+
{
62+
_handler.EnqueueError(System.Net.HttpStatusCode.ServiceUnavailable);
63+
var result = await _provider.IsAvailableAsync();
64+
result.Should().BeFalse();
65+
}
66+
67+
[Fact]
68+
public async Task ListModelsAsync_ReturnsModelIds()
69+
{
70+
_handler.EnqueueJson("""{ "data": [{"id": "gpt-4o"}, {"id": "gpt-3.5-turbo"}] }""");
71+
var models = await _provider.ListModelsAsync();
72+
models.Should().Contain("gpt-4o");
73+
models.Should().BeInAscendingOrder();
74+
}
75+
76+
[Fact]
77+
public void Name_ReturnsOpenai()
78+
{
79+
_provider.Name.Should().Be("openai");
80+
}
81+
}

0 commit comments

Comments
 (0)