1
+ import { join } from 'node:path' ;
1
2
import { afterAll , describe , expect , test } from 'vitest' ;
2
3
import { cleanupChildProcesses , createRunner } from '../../../utils/runner' ;
3
4
@@ -7,125 +8,133 @@ describe('ai', () => {
7
8
cleanupChildProcesses ( ) ;
8
9
} ) ;
9
10
10
- test ( 'creates ai related spans' , async ( ) => {
11
- const EXPECTED_TRANSACTION = {
12
- transaction : 'main' ,
13
- spans : expect . arrayContaining ( [
14
- expect . objectContaining ( {
15
- data : expect . objectContaining ( {
16
- 'ai.completion_tokens.used' : 20 ,
17
- 'ai.model.id' : 'mock-model-id' ,
18
- 'ai.model.provider' : 'mock-provider' ,
19
- 'ai.model_id' : 'mock-model-id' ,
20
- 'ai.operationId' : 'ai.generateText' ,
21
- 'ai.pipeline.name' : 'generateText' ,
22
- 'ai.prompt_tokens.used' : 10 ,
23
- 'ai.response.finishReason' : 'stop' ,
24
- 'ai.settings.maxRetries' : 2 ,
25
- 'ai.settings.maxSteps' : 1 ,
26
- 'ai.streaming' : false ,
27
- 'ai.total_tokens.used' : 30 ,
28
- 'ai.usage.completionTokens' : 20 ,
29
- 'ai.usage.promptTokens' : 10 ,
30
- 'operation.name' : 'ai.generateText' ,
31
- 'sentry.op' : 'ai.pipeline.generateText' ,
32
- 'sentry.origin' : 'auto.vercelai.otel' ,
33
- } ) ,
34
- description : 'generateText' ,
35
- op : 'ai.pipeline.generateText' ,
36
- origin : 'auto.vercelai.otel' ,
37
- status : 'ok' ,
11
+ const EXPECTED_TRANSACTION = {
12
+ transaction : 'main' ,
13
+ spans : expect . arrayContaining ( [
14
+ expect . objectContaining ( {
15
+ data : expect . objectContaining ( {
16
+ 'ai.completion_tokens.used' : 20 ,
17
+ 'ai.model.id' : 'mock-model-id' ,
18
+ 'ai.model.provider' : 'mock-provider' ,
19
+ 'ai.model_id' : 'mock-model-id' ,
20
+ 'ai.operationId' : 'ai.generateText' ,
21
+ 'ai.pipeline.name' : 'generateText' ,
22
+ 'ai.prompt_tokens.used' : 10 ,
23
+ 'ai.response.finishReason' : 'stop' ,
24
+ 'ai.settings.maxRetries' : 2 ,
25
+ 'ai.settings.maxSteps' : 1 ,
26
+ 'ai.streaming' : false ,
27
+ 'ai.total_tokens.used' : 30 ,
28
+ 'ai.usage.completionTokens' : 20 ,
29
+ 'ai.usage.promptTokens' : 10 ,
30
+ 'operation.name' : 'ai.generateText' ,
31
+ 'sentry.op' : 'ai.pipeline.generateText' ,
32
+ 'sentry.origin' : 'auto.vercelai.otel' ,
38
33
} ) ,
39
- expect . objectContaining ( {
40
- data : expect . objectContaining ( {
41
- 'sentry. origin' : 'auto.vercelai.otel' ,
42
- 'sentry.op' : 'ai.run.doGenerate ' ,
43
- 'operation.name' : 'ai.generateText.doGenerate' ,
44
- 'ai.operationId' : 'ai.generateText.doGenerate' ,
45
- 'ai.model.provider' : 'mock-provider' ,
46
- 'ai.model.id ' : 'mock-model-id ' ,
47
- 'ai.settings.maxRetries' : 2 ,
48
- 'gen_ai.system ' : 'mock-provider ' ,
49
- 'gen_ai.request.model ' : 'mock-model-id ' ,
50
- 'ai.pipeline.name ' : 'generateText.doGenerate ' ,
51
- 'ai.model_id ' : 'mock-model-id' ,
52
- 'ai.streaming ' : false ,
53
- 'ai.response.finishReason ' : 'stop ' ,
54
- 'ai.response .model' : 'mock-model-id' ,
55
- 'ai.usage.promptTokens ' : 10 ,
56
- 'ai.usage.completionTokens ' : 20 ,
57
- 'gen_ai.response.finish_reasons ' : [ 'stop' ] ,
58
- 'gen_ai.usage.input_tokens ' : 10 ,
59
- 'gen_ai.usage.output_tokens ' : 20 ,
60
- 'ai.completion_tokens.used ' : 20 ,
61
- 'ai.prompt_tokens.used ' : 10 ,
62
- 'ai.total_tokens.used ' : 30 ,
63
- } ) ,
64
- description : 'generateText.doGenerate' ,
65
- op : 'ai.run.doGenerate' ,
66
- origin : 'auto.vercelai.otel' ,
67
- status : 'ok' ,
34
+ description : 'generateText' ,
35
+ op : 'ai.pipeline.generateText' ,
36
+ origin : 'auto.vercelai.otel' ,
37
+ status : 'ok ' ,
38
+ } ) ,
39
+ expect . objectContaining ( {
40
+ data : expect . objectContaining ( {
41
+ 'sentry.origin ' : 'auto.vercelai.otel ' ,
42
+ 'sentry.op' : 'ai.run.doGenerate' ,
43
+ 'operation.name ' : 'ai.generateText.doGenerate ' ,
44
+ 'ai.operationId ' : 'ai.generateText.doGenerate ' ,
45
+ 'ai.model.provider ' : 'mock-provider ' ,
46
+ 'ai.model.id ' : 'mock-model-id' ,
47
+ 'ai.settings.maxRetries ' : 2 ,
48
+ 'gen_ai.system ' : 'mock-provider ' ,
49
+ 'gen_ai.request .model' : 'mock-model-id' ,
50
+ 'ai.pipeline.name ' : 'generateText.doGenerate' ,
51
+ 'ai.model_id ' : 'mock-model-id' ,
52
+ 'ai.streaming ' : false ,
53
+ 'ai.response.finishReason ' : 'stop' ,
54
+ 'ai.response.model ' : 'mock-model-id' ,
55
+ 'ai.usage.promptTokens ' : 10 ,
56
+ 'ai.usage.completionTokens ' : 20 ,
57
+ 'gen_ai.response.finish_reasons ' : [ 'stop' ] ,
58
+ 'gen_ai.usage.input_tokens' : 10 ,
59
+ 'gen_ai.usage.output_tokens' : 20 ,
60
+ 'ai.completion_tokens.used' : 20 ,
61
+ 'ai.prompt_tokens.used' : 10 ,
62
+ 'ai.total_tokens.used' : 30 ,
68
63
} ) ,
69
- expect . objectContaining ( {
70
- data : expect . objectContaining ( {
71
- 'ai.completion_tokens.used' : 20 ,
72
- 'ai.model.id' : 'mock-model-id ' ,
73
- 'ai.model.provider' : 'mock-provider' ,
74
- 'ai.model_id' : 'mock-model-id' ,
75
- 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
76
- 'ai.operationId ' : 'ai.generateText' ,
77
- 'ai.pipeline.name ' : 'generateText ' ,
78
- 'ai.prompt_tokens.used ' : 10 ,
79
- 'ai.response.finishReason ' : 'stop ' ,
80
- 'ai.input_messages ' : '{"prompt":"Where is the second span?"}' ,
81
- 'ai.settings.maxRetries ' : 2 ,
82
- 'ai.settings.maxSteps ' : 1 ,
83
- 'ai.streaming ' : false ,
84
- 'ai.total_tokens.used ' : 30 ,
85
- 'ai.usage.completionTokens ' : 20 ,
86
- 'ai.usage.promptTokens ' : 10 ,
87
- 'operation.name ' : 'ai.generateText' ,
88
- 'sentry.op ' : 'ai.pipeline.generateText' ,
89
- 'sentry.origin ' : 'auto.vercelai.otel' ,
90
- } ) ,
91
- description : 'generateText' ,
92
- op : 'ai.pipeline .generateText' ,
93
- origin : 'auto.vercelai.otel ' ,
94
- status : 'ok ' ,
64
+ description : 'generateText.doGenerate' ,
65
+ op : 'ai.run.doGenerate' ,
66
+ origin : 'auto.vercelai.otel' ,
67
+ status : 'ok ' ,
68
+ } ) ,
69
+ expect . objectContaining ( {
70
+ data : expect . objectContaining ( {
71
+ 'ai.completion_tokens.used ' : 20 ,
72
+ 'ai.model.id ' : 'mock-model-id ' ,
73
+ 'ai.model.provider ' : 'mock-provider' ,
74
+ 'ai.model_id ' : 'mock-model-id ' ,
75
+ 'ai.prompt ' : '{"prompt":"Where is the second span?"}' ,
76
+ 'ai.operationId ' : 'ai.generateText' ,
77
+ 'ai.pipeline.name ' : 'generateText' ,
78
+ 'ai.prompt_tokens.used ' : 10 ,
79
+ 'ai.response.finishReason ' : 'stop' ,
80
+ 'ai.input_messages ' : '{"prompt":"Where is the second span?"}' ,
81
+ 'ai.settings.maxRetries ' : 2 ,
82
+ 'ai.settings.maxSteps ' : 1 ,
83
+ 'ai.streaming ' : false ,
84
+ 'ai.total_tokens.used ' : 30 ,
85
+ 'ai.usage.completionTokens' : 20 ,
86
+ 'ai.usage.promptTokens' : 10 ,
87
+ 'operation.name' : 'ai.generateText' ,
88
+ 'sentry.op' : 'ai.pipeline.generateText ' ,
89
+ 'sentry.origin' : 'auto.vercelai.otel ' ,
95
90
} ) ,
96
- expect . objectContaining ( {
97
- data : expect . objectContaining ( {
98
- 'sentry. origin' : 'auto.vercelai.otel' ,
99
- 'sentry.op' : 'ai.run.doGenerate ' ,
100
- 'operation.name' : 'ai.generateText.doGenerate' ,
101
- 'ai.operationId' : 'ai.generateText.doGenerate' ,
102
- 'ai.model.provider' : 'mock-provider' ,
103
- 'ai.model.id ' : 'mock-model-id ' ,
104
- 'ai.settings.maxRetries' : 2 ,
105
- 'gen_ai.system ' : 'mock-provider ' ,
106
- 'gen_ai.request.model ' : 'mock-model-id ' ,
107
- 'ai.pipeline.name ' : 'generateText.doGenerate ' ,
108
- 'ai.model_id ' : 'mock-model-id' ,
109
- 'ai.streaming ' : false ,
110
- 'ai.response.finishReason ' : 'stop ' ,
111
- 'ai.response .model' : 'mock-model-id' ,
112
- 'ai.usage.promptTokens ' : 10 ,
113
- 'ai.usage.completionTokens ' : 20 ,
114
- 'gen_ai.response.finish_reasons ' : [ 'stop' ] ,
115
- 'gen_ai.usage.input_tokens ' : 10 ,
116
- 'gen_ai.usage.output_tokens ' : 20 ,
117
- 'ai.completion_tokens.used ' : 20 ,
118
- 'ai.prompt_tokens.used ' : 10 ,
119
- 'ai.total_tokens.used ' : 30 ,
120
- } ) ,
121
- description : 'generateText.doGenerate' ,
122
- op : 'ai.run.doGenerate' ,
123
- origin : 'auto.vercelai.otel' ,
124
- status : 'ok' ,
91
+ description : 'generateText' ,
92
+ op : 'ai.pipeline.generateText' ,
93
+ origin : 'auto.vercelai.otel' ,
94
+ status : 'ok ' ,
95
+ } ) ,
96
+ expect . objectContaining ( {
97
+ data : expect . objectContaining ( {
98
+ 'sentry.origin ' : 'auto.vercelai.otel ' ,
99
+ 'sentry.op' : 'ai.run.doGenerate' ,
100
+ 'operation.name ' : 'ai.generateText.doGenerate ' ,
101
+ 'ai.operationId ' : 'ai.generateText.doGenerate ' ,
102
+ 'ai.model.provider ' : 'mock-provider ' ,
103
+ 'ai.model.id ' : 'mock-model-id' ,
104
+ 'ai.settings.maxRetries ' : 2 ,
105
+ 'gen_ai.system ' : 'mock-provider ' ,
106
+ 'gen_ai.request .model' : 'mock-model-id' ,
107
+ 'ai.pipeline.name ' : 'generateText.doGenerate' ,
108
+ 'ai.model_id ' : 'mock-model-id' ,
109
+ 'ai.streaming ' : false ,
110
+ 'ai.response.finishReason ' : 'stop' ,
111
+ 'ai.response.model ' : 'mock-model-id' ,
112
+ 'ai.usage.promptTokens ' : 10 ,
113
+ 'ai.usage.completionTokens ' : 20 ,
114
+ 'gen_ai.response.finish_reasons ' : [ 'stop' ] ,
115
+ 'gen_ai.usage.input_tokens' : 10 ,
116
+ 'gen_ai.usage.output_tokens' : 20 ,
117
+ 'ai.completion_tokens.used' : 20 ,
118
+ 'ai.prompt_tokens.used' : 10 ,
119
+ 'ai.total_tokens.used' : 30 ,
125
120
} ) ,
126
- ] ) ,
127
- } ;
121
+ description : 'generateText.doGenerate' ,
122
+ op : 'ai.run.doGenerate' ,
123
+ origin : 'auto.vercelai.otel' ,
124
+ status : 'ok' ,
125
+ } ) ,
126
+ ] ) ,
127
+ } ;
128
128
129
+ test ( 'creates ai related spans - cjs' , async ( ) => {
129
130
await createRunner ( __dirname , 'scenario.js' ) . expect ( { transaction : EXPECTED_TRANSACTION } ) . start ( ) . completed ( ) ;
130
131
} ) ;
132
+
133
+ test ( 'creates ai related spans - esm' , async ( ) => {
134
+ await createRunner ( __dirname , 'scenario.mjs' )
135
+ . withFlags ( '--import' , join ( __dirname , 'instrument.mjs' ) )
136
+ . expect ( { transaction : EXPECTED_TRANSACTION } )
137
+ . start ( )
138
+ . completed ( ) ;
139
+ } ) ;
131
140
} ) ;
0 commit comments