diff --git a/.devcontainer/README.md b/.devcontainer/README.md
index 396929a..c7d8d27 100644
--- a/.devcontainer/README.md
+++ b/.devcontainer/README.md
@@ -1,4 +1,9 @@
+
+

+
+
+
> **Remember to shutdown a GitHub Codespace when it is not in use!**
# Dev Containers Quick Start
diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock
index 8f932cd..7da3a9f 100644
--- a/.speakeasy/gen.lock
+++ b/.speakeasy/gen.lock
@@ -1,17 +1,19 @@
lockVersion: 2.0.0
id: c48cf606-fb42-4a45-9c23-8f0555307828
management:
- docChecksum: 59bf622329ff2d6d2a28acceada10a6d
+ docChecksum: 61b0dbad803186fe543a409f117e9993
docVersion: 1.0.0
speakeasyVersion: 1.680.0
generationVersion: 2.788.4
- releaseVersion: 0.9.1
- configChecksum: 9c8b936b4b1d37d964e844ee1a15cd4b
+ releaseVersion: 0.9.2
+ configChecksum: f906b25652bcc83a52d1c74e5018c02e
+ repoURL: https://github.com/OpenRouterTeam/python-sdk.git
+ installationURL: https://github.com/OpenRouterTeam/python-sdk.git
published: true
persistentEdits:
- generation_id: ce3858e4-7fbd-4f52-9d57-24016d5c1504
- pristine_commit_hash: c581f0a6ace9e1c793b0f4d759585b420c157610
- pristine_tree_hash: c5a3bf3d181c5c54d764cdf6372feb79505d5ed7
+ generation_id: af640944-4615-4a9d-a9bc-5831d663c096
+ pristine_commit_hash: 112ce9efef548df7b7402f481412b115d6adb2e3
+ pristine_tree_hash: cae77974ea359e29871863bc058e44b149e00e98
features:
python:
acceptHeaders: 3.0.0
@@ -50,8 +52,8 @@ features:
trackedFiles:
.devcontainer/README.md:
id: b170c0f184ac
- last_write_checksum: sha1:3c63dfd4dcea8df0d4add09e975a6887e6da4aab
- pristine_git_object: 396929ab2e57cf09dd25ec83bd4496f733b95701
+ last_write_checksum: sha1:919a25ed5129d316f05a60131c164011763b7c54
+ pristine_git_object: c7d8d278438e7906fedb39d99151372b3324ea66
.devcontainer/devcontainer.json:
id: b34062a34eb1
last_write_checksum: sha1:94309e8c8eab5ab063986bf625f513a59825e947
@@ -1070,16 +1072,16 @@ trackedFiles:
pristine_git_object: e38b5c19b6356f255783dfacea66b45418679e21
docs/components/imagegenerationservertoolconfig.md:
id: 4086d42eed1b
- last_write_checksum: sha1:9da8f5dbeb8e76419b5ca3041965105934b0ad9b
- pristine_git_object: b3fbb8ea61ef0f98681cc1507b2f192556822141
+ last_write_checksum: sha1:4be4af2bb484ca26d779f052da6a645dcb4d912c
+ pristine_git_object: d1bc3aceee221331b9f45bd00bbd8d0ea75a4a73
docs/components/imagegenerationservertoolconfigunion.md:
id: 7ba56d56f6bc
last_write_checksum: sha1:99eb92cf87c2e68e8db530d3ebdaaff6be117587
pristine_git_object: b9319d587cbf5118b4b12874b67a4a86ec4d1b16
docs/components/imagegenerationservertoolopenrouter.md:
id: c5bf0021441a
- last_write_checksum: sha1:25069e75ba4a6a6b98bc6c168ef09c2e2960f630
- pristine_git_object: edee38367aaf7511e2a5d3f386d493005ce3c544
+ last_write_checksum: sha1:01a0ffcf8d7d3c806c1135e73ed4926ca5d05dc9
+ pristine_git_object: dce650cbf584a1798754d1610898a9432b544d28
docs/components/imagegenerationservertoolopenroutertype.md:
id: 420c811d4561
last_write_checksum: sha1:6b8ce48b3e662ea3bfd8a9be737e43cd1b051e39
@@ -1550,8 +1552,8 @@ trackedFiles:
pristine_git_object: e88886f738fef9ad22e5efc74e3e0ea2cd8ceeb3
docs/components/openresponsesresult.md:
id: 9795b7de3f92
- last_write_checksum: sha1:5c1834da4b2297e6cdc0328c8d9c026303f6b9b3
- pristine_git_object: 7980a1a0edea51a2fa9cde48f5f9554029ef827b
+ last_write_checksum: sha1:616759398ca0662e4e9b6bf424727b8b019cc379
+ pristine_git_object: 1dd4efe3b7032b9dc3c86e4202bde0c8dcd97360
docs/components/openresponsesresultobject.md:
id: a8c7f7bc0b24
last_write_checksum: sha1:3df49df80dda49ea4cb53f913931fd4754769b66
@@ -1578,8 +1580,8 @@ trackedFiles:
pristine_git_object: 947a0b4decad7398ad4ddd0ff70a3a8cf95c5e87
docs/components/options.md:
id: d57f3108a48d
- last_write_checksum: sha1:33aba904de1c5bbbb772a34eaf56daa859a81143
- pristine_git_object: e695cb694be73ef388c6039ef8803a6ce146e500
+ last_write_checksum: sha1:1b035f938558bfad488929b804e3a863f3cf0c9e
+ pristine_git_object: e14bae1858d5ed546b84b553a9e87bcfbfae22c3
docs/components/order.md:
id: 16e6ae6962e7
last_write_checksum: sha1:a060705e562a2f3fc024c4fd8aa80604535b0bc1
@@ -1702,8 +1704,8 @@ trackedFiles:
pristine_git_object: d7d592b96122136f2e4becd5edc72b95062d6237
docs/components/outputimagegenerationservertoolitem.md:
id: 0a39a1c10949
- last_write_checksum: sha1:b7ffc78b0acdbd0c9e5c9b6a777e8e2e39ff16ee
- pristine_git_object: 563b9e780a842074f1ea9da0a9b81133833609fc
+ last_write_checksum: sha1:794cbf0f99dc7ea2739d94d9ab96e2f3f07bb323
+ pristine_git_object: ec9409ff0690e6787b0f464c83aa9cf5753fb4f6
docs/components/outputimagegenerationservertoolitemtype.md:
id: e8483d8856aa
last_write_checksum: sha1:43f034d1b716ef2878be034bb01e836395c7b686
@@ -1722,8 +1724,8 @@ trackedFiles:
pristine_git_object: d9e663fcae49dba7885223f4f0848aa9f9a23002
docs/components/outputitems.md:
id: 92565743132e
- last_write_checksum: sha1:0080f5650ea184795c28fc44983eec2989b58634
- pristine_git_object: aaa3a85a2a72fba77cc2a3e939d8df3a00e1073e
+ last_write_checksum: sha1:58d753ef954e4e8d45c7d74a14257fac307eaf3d
+ pristine_git_object: 146c12f4dd9b8583c2a7874d80362b9182b1bb1b
docs/components/outputlogs.md:
id: 0821eefe9546
last_write_checksum: sha1:1ff4592093bd2fe28038cc8db28bee6a35025c96
@@ -1864,6 +1866,14 @@ trackedFiles:
id: 2efd271441f7
last_write_checksum: sha1:36a63bf61d927a595f3e58dd0b607a8937db9ff3
pristine_git_object: 24800454b81c6b2f9a0180f283d793216bd90384
+ docs/components/outputsearchmodelsservertoolitem.md:
+ id: 17da96b2505f
+ last_write_checksum: sha1:da40a4743d0ae6385588944cac094ce9bd4d14c9
+ pristine_git_object: a5e1fb16896dc6841105b7f01e735539b8876a5b
+ docs/components/outputsearchmodelsservertoolitemtype.md:
+ id: 37be34b271c8
+ last_write_checksum: sha1:237762e35b5cf89509bc3bf8b261f174016d3903
+ pristine_git_object: dc9df5ca2122a044efa3d2ab1d13b2f957f67d34
docs/components/outputtexteditorservertoolitem.md:
id: 6c114519044a
last_write_checksum: sha1:7b90b5ce30ff6df03376e337e266dbdadfa751c0
@@ -2006,8 +2016,8 @@ trackedFiles:
pristine_git_object: 56d2fd0642e93c72502597f761e75cf50ef6a597
docs/components/providername.md:
id: fe24a77d9911
- last_write_checksum: sha1:47af6679dbedcf8d8d0ac8995cfc9cd3020a6b7b
- pristine_git_object: 22effdd1f55bc9d60648a80e825e17d4b94b01c1
+ last_write_checksum: sha1:f824508d7058025fba04264c5086e374c23e0a76
+ pristine_git_object: 1136a77ac827853575c6ae83a4efdf0bd097fec0
docs/components/provideroverloadedresponseerrordata.md:
id: c2a8cc4ec6df
last_write_checksum: sha1:1d206d00dc0d91b5ceec470579c6ce345cc23d29
@@ -2022,8 +2032,8 @@ trackedFiles:
pristine_git_object: 9dca4ee9d4a9ec0a46ce21fb3823f05e8e119334
docs/components/providerresponseprovidername.md:
id: 800bab487911
- last_write_checksum: sha1:befe2d2077d8abc969729b09ce4aba6a2b14bb37
- pristine_git_object: 73838c84a7682a4b6903b0ab3383b6514ca6951e
+ last_write_checksum: sha1:2674cebb5d1bb256776d982001c05305c0a0e419
+ pristine_git_object: 49b34491b8fc5c7b70462dcdd8d16fe0b0d3c002
docs/components/providersort.md:
id: 290449b1880f
last_write_checksum: sha1:f15d65e5c226a222c3bfc0b9f55dd9ca0443d415
@@ -2412,14 +2422,6 @@ trackedFiles:
id: 0cf5ab254bc8
last_write_checksum: sha1:632fc2775ec4066a2b1ee403579ff59bdddb4a91
pristine_git_object: 4b82eb64344981f49f6a8fa220eb817427f4de78
- docs/components/textconfig.md:
- id: 11301a29023b
- last_write_checksum: sha1:62e407146de6b41467b9ec8c630ad8ad8a3f1743
- pristine_git_object: cb4ddd8f20f0302ac0550682d8e75f162c78c0b3
- docs/components/textconfigverbosity.md:
- id: d49483ecf413
- last_write_checksum: sha1:a51ce28b6733aa1b579c3f9dacee605e08086497
- pristine_git_object: 0bdb609ebbb664477effde646423a619b74738b9
docs/components/textdeltaevent.md:
id: 339fe3591c0d
last_write_checksum: sha1:97aed7ba0e23221806738f4ed7819f8aec0db957
@@ -2438,12 +2440,8 @@ trackedFiles:
pristine_git_object: 5c73058b054429983632f0542af4fc0fb50300c5
docs/components/textextendedconfig.md:
id: 669e1815f825
- last_write_checksum: sha1:a64da07e11d1437c3c7453d7a6c8d3f2757a8af6
- pristine_git_object: cdda558b74a056f33dbc0f2cf63a02265087d7db
- docs/components/textextendedconfigverbosity.md:
- id: 355071ba1bcf
- last_write_checksum: sha1:9457b6011994833a86a2223b544d9d7d1b7189b6
- pristine_git_object: bbb9988342a55552bdd3c0985cd9f6aa9a302bdd
+ last_write_checksum: sha1:7751fbe499ffee299268428faa3a3c63a5541796
+ pristine_git_object: b876df7514e8427c7eb7bacd42461924d5b0796f
docs/components/tokenizer.md:
id: f1ad3417d8d1
last_write_checksum: sha1:4223f630f7d880b9288fb654f67817560fb2dbd8
@@ -2560,6 +2558,10 @@ trackedFiles:
id: ad5d1b284346
last_write_checksum: sha1:30ebb41e8bac9a8d817b2c0ab0a9e9fded4a73ee
pristine_git_object: 0698d06289a06a6cfcd35f7069f2ef64a0aa0e25
+ docs/components/verbosity.md:
+ id: 0a9da5ac382d
+ last_write_checksum: sha1:76ec453c1e767ce0f8c76afc0617cdc7ff29b60c
+ pristine_git_object: cf10c9e56352e1ef074e794b915639774a0de872
docs/components/videogenerationrequest.md:
id: cacead3ac9fe
last_write_checksum: sha1:e9f69f5b244d468fbf3ecdd1872671fca4b4fc9b
@@ -3018,8 +3020,8 @@ trackedFiles:
pristine_git_object: 2855d5b95d41a494bc7ffa9f85c74ff27a46ca96
docs/operations/getgenerationdata.md:
id: 89a3aab8da20
- last_write_checksum: sha1:a535e1604ef9524eb8fb752711abd36dbe7219c6
- pristine_git_object: 7be5766a6c6a5f065834bdbbec761795c255689f
+ last_write_checksum: sha1:a9390399582e009205faf25c8b1de90b1dad0db7
+ pristine_git_object: 183399a0b5f704e790a1d2777cd45ec886154a5e
docs/operations/getgenerationglobals.md:
id: 0d6e856c5861
last_write_checksum: sha1:b5236815132dedf2b595fd520f1753d5ec8bb6e0
@@ -3422,12 +3424,16 @@ trackedFiles:
pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544
pyproject.toml:
id: 5d07e7d72637
- last_write_checksum: sha1:fe68c7165977ede3cf438be1344f72e54cf67919
- pristine_git_object: c678027236e18070a0081d37fb78bfd44392a8c0
+ last_write_checksum: sha1:62d35f2ecbe840540100da937920ee63980a1e12
+ pristine_git_object: 9e4606d0c68e092819b9d86eb23859bd85c5ede7
+ scripts/prepare_readme.py:
+ id: e0c5957a6035
+ last_write_checksum: sha1:77f44b60b98bc126557ec27391f91dfba764bb54
+ pristine_git_object: 03e9f9b59063c73d6f24995a77cdb971f3318d30
scripts/publish.sh:
id: fe273b08f514
- last_write_checksum: sha1:adc9b741c12ad1591ab4870eabe20f0d0a86cd1a
- pristine_git_object: ef28dc10c60d7d6a4bac0c6a1e9caba36b471861
+ last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3
+ pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48
src/openrouter/__init__.py:
id: 4514650f64eb
last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088
@@ -3446,8 +3452,8 @@ trackedFiles:
pristine_git_object: 07ace03229fd3267623c8f48665d2c3a67c3565d
src/openrouter/_version.py:
id: d8d15ad6c586
- last_write_checksum: sha1:8283c360656da42036d190270a7ad47551bc4317
- pristine_git_object: 4d43d337f71492564f02b651e5bfc78d27932ac5
+ last_write_checksum: sha1:dd226a1c87253767a3a41d735f600db8980faa45
+ pristine_git_object: fcebde34f88b9450f406b1563d00942dbd6d7b80
src/openrouter/analytics.py:
id: cb406b5aaabb
last_write_checksum: sha1:6d47d1ab3da9cb4ac9f0a2bfe442f1a82a614f35
@@ -3470,8 +3476,8 @@ trackedFiles:
pristine_git_object: bfa078ebbbff63f2c80860abf718fc57abcac2bd
src/openrouter/components/__init__.py:
id: 81754e97b3f4
- last_write_checksum: sha1:fba957e705a1e6222a64cdd71111390c6e14826e
- pristine_git_object: e41de295792aaaea529e94d6c70c3ee111266202
+ last_write_checksum: sha1:24ac44b6fc1dc2ed3b7f73a33c8c76b0ee924642
+ pristine_git_object: cce159ecb2d000232c002caceef3ad73ab5b6073
src/openrouter/components/activityitem.py:
id: 3bbe7d35f459
last_write_checksum: sha1:32b873ba5c0a9dbf5f0cf32c0217ce17395ae69e
@@ -3894,8 +3900,8 @@ trackedFiles:
pristine_git_object: c9609dbda39bbc43666c9625cf58baf469a6160c
src/openrouter/components/imagegenerationservertoolconfig.py:
id: 76f45207c1be
- last_write_checksum: sha1:8d092f865ab995390b0370627657a8689c6a36aa
- pristine_git_object: 7b7010abb24612645e134c7052a9475ca327b2d5
+ last_write_checksum: sha1:065e1ab5d5e29f01bc86428ecb438ad9f7f9dc7b
+ pristine_git_object: 0fdc8f67a8af50e789e2ae73d647d9ba43b703b2
src/openrouter/components/imagegenerationservertoolconfig_union.py:
id: d1c630a1476a
last_write_checksum: sha1:50d45b5a787e8cc9024c05b31e4113993ffe4038
@@ -4058,8 +4064,8 @@ trackedFiles:
pristine_git_object: 08d990d8cc07b7928158fc55784cd9d613f4c58b
src/openrouter/components/openresponsesresult.py:
id: 78f7e33b19e6
- last_write_checksum: sha1:8dd339b5021699b49dbfcff86e388947f61d5747
- pristine_git_object: fa04955d5675d8aef3ff3deaeb6560c7a0070f4f
+ last_write_checksum: sha1:4f234023126aad91df715d99450768fc2ed3252b
+ pristine_git_object: ea63258a915e1cd5663aa9f352f83af40f50e4a2
src/openrouter/components/openrouterwebsearchservertool.py:
id: f5400a5de3cd
last_write_checksum: sha1:e1cd10158e15f88d5c2903b1508a42d4006f4f3f
@@ -4110,16 +4116,16 @@ trackedFiles:
pristine_git_object: abaaa53610171f25ed9d814f355f9a600101b254
src/openrouter/components/outputimagegenerationservertoolitem.py:
id: cb34639447d7
- last_write_checksum: sha1:2383f1a2904297a4289e0523778bb928f5614d1f
- pristine_git_object: e91756b3e8d3e46283706192f9e44d03770d0fb3
+ last_write_checksum: sha1:21f58c84bea8a3ace8b91bba914e54cf62dad70e
+ pristine_git_object: a94bd0964eb714782858820e0db39afc28a0dcc3
src/openrouter/components/outputitemimagegenerationcall.py:
id: 0b4cbdbd733f
last_write_checksum: sha1:f9ea61a170b8e516108d2d59b0e7df9d264d3aad
pristine_git_object: 897c4727eec2497fe15e5603187e24218c08ac87
src/openrouter/components/outputitems.py:
id: 90f107de3116
- last_write_checksum: sha1:3e25cbb638d9f753193e6c7882077468032a30f8
- pristine_git_object: 4457f805c46470e36ff5cfd30eebab43e0039a96
+ last_write_checksum: sha1:0f241fcf9e8eb3dd8b315c6efee2d4017d0203fb
+ pristine_git_object: 75ffbc584663c12c8e7629145a76ca8d60b7de75
src/openrouter/components/outputmcpservertoolitem.py:
id: bb8c2536a58e
last_write_checksum: sha1:6cbe556b2268af0142d982007b273cc43b7605c7
@@ -4148,6 +4154,10 @@ trackedFiles:
id: 0bc0568f44f9
last_write_checksum: sha1:3c460a262ff94910ea443fa6ac7d3f6608c3209f
pristine_git_object: 50eff94f87fe82aba4fe330ee61e79ab885847fc
+ src/openrouter/components/outputsearchmodelsservertoolitem.py:
+ id: ba87dc220c62
+ last_write_checksum: sha1:f9682aae6fc8cafa19217af10d745180cacde1e2
+ pristine_git_object: f94949d2e5aedf4873295efcdf87f0f5367c1d32
src/openrouter/components/outputtexteditorservertoolitem.py:
id: 2a5dc9318fe6
last_write_checksum: sha1:75ce53e78b352b0427acfb5310b2500ee76351dd
@@ -4226,8 +4236,8 @@ trackedFiles:
pristine_git_object: 72dd15242d7d35b2b493f90d0cdebf6f8a51d684
src/openrouter/components/providername.py:
id: fcc722fa2fce
- last_write_checksum: sha1:5f1b8a8471ab3d11bd0d18927a9cd9c747bababe
- pristine_git_object: 7764a12512827640f3c32a22b199b09cd890b57f
+ last_write_checksum: sha1:be70b906ae9d172c5e5310728d3a4e83cff153ab
+ pristine_git_object: 109379a879ba8882755e0f069963062e0d792a06
src/openrouter/components/provideroverloadedresponseerrordata.py:
id: 5b693682570e
last_write_checksum: sha1:41a977452d58b52cb9d4b3a85f4b7970529352d5
@@ -4238,8 +4248,8 @@ trackedFiles:
pristine_git_object: 5145815e6196470f3f5f24893afdc4415d27c53e
src/openrouter/components/providerresponse.py:
id: ad3887be54c5
- last_write_checksum: sha1:d5988a5d8c6174971287275305c6b638b6325940
- pristine_git_object: 4856eee2be0d7aeaadc4cbacd0ba1b08e5886999
+ last_write_checksum: sha1:7c354d7847c0714711db5b75350ec406222f443d
+ pristine_git_object: 14f4f7ed497f6d78c51be562f3b6274c7496ef95
src/openrouter/components/providersort.py:
id: 348e382bf494
last_write_checksum: sha1:57551507f95cd2e16ef995e1c13f859fd0726152
@@ -4420,10 +4430,6 @@ trackedFiles:
id: e39bb8998edb
last_write_checksum: sha1:cd5570f2a08ea1c0315dfcbc0e4d704610adca41
pristine_git_object: 49f4ae2d09fa5d311465a0a77f7cede0f97f779a
- src/openrouter/components/textconfig.py:
- id: cbd40c12b745
- last_write_checksum: sha1:737f5adabb641d1d30152cee2f9a8db1e43d697b
- pristine_git_object: cf8fa534d38146d0734b6161264406d5d53b7955
src/openrouter/components/textdeltaevent.py:
id: 6d1d416a373d
last_write_checksum: sha1:9a024e07b3f2d65d5f88e01f0e2035efd956f3fc
@@ -4434,8 +4440,8 @@ trackedFiles:
pristine_git_object: 9bc3b1fd09ca7d4be981ceb7b10e14fb157dbb5f
src/openrouter/components/textextendedconfig.py:
id: 150a449e46de
- last_write_checksum: sha1:8e6dd8672fcc687c8a930c533d94daac4bb4ce5d
- pristine_git_object: ee561b18699c16f952bc036a2d0903e0ea3a8d9b
+ last_write_checksum: sha1:81f539fffffee92247de216b1ddc9269b02fcd54
+ pristine_git_object: 89224c7a342575e694ac269e1773c4c2a0ad5c07
src/openrouter/components/toolcallstatus.py:
id: 32339139c3f3
last_write_checksum: sha1:a75631cdb460f190aa10d5557519519d1ef5fbe3
@@ -4486,8 +4492,8 @@ trackedFiles:
pristine_git_object: 3ff415b693496c00c41261d09e7c0c12d83730fd
src/openrouter/components/videogenerationrequest.py:
id: 70e3c9ff288c
- last_write_checksum: sha1:f892d1c5506561312151604aa837b90546603d7f
- pristine_git_object: db54f129779d55863e0ea63affc0600700d2fcaf
+ last_write_checksum: sha1:5849b89902428311a0328c40e472c6f796119ffa
+ pristine_git_object: 4efa2a085160d7eac83b1677584dbc8b76c3d021
src/openrouter/components/videogenerationresponse.py:
id: 541f1321b072
last_write_checksum: sha1:051ace67776106616ec4c8e55cf2df9a99855d0f
@@ -4754,8 +4760,8 @@ trackedFiles:
pristine_git_object: 2d26173fd7e4555e188999eee1e282e85b68c73d
src/openrouter/operations/getgeneration.py:
id: d7c64961334f
- last_write_checksum: sha1:8816e63b89634cc5b5df7ef6119421224965e529
- pristine_git_object: 94dfcb738c716d409f17be7a2e11485531d90a34
+ last_write_checksum: sha1:443982f27645bf91a4ab70556bca42332045c526
+ pristine_git_object: 2433856e4c48cecba3ddca4d050fd48314c7e5f7
src/openrouter/operations/getguardrail.py:
id: 9b3309b59c64
last_write_checksum: sha1:f83b3d52731324d0bc87c382196b84713e77f6da
@@ -5098,7 +5104,7 @@ examples:
id: "gen-1234567890"
responses:
"200":
- application/json: {"data": {"api_type": "completions", "app_id": 12345, "cache_discount": 0.0002, "cancelled": false, "created_at": "2024-07-15T23:33:19.433273+00:00", "external_user": "user-123", "finish_reason": "stop", "generation_time": 1200, "http_referer": "", "id": "gen-3bhGkxlo4XFrqiabUM7NDtwDzWwG", "is_byok": false, "latency": 1250, "model": "sao10k/l3-stheno-8b", "moderation_latency": 50, "native_finish_reason": "stop", "native_tokens_cached": 3, "native_tokens_completion": 25, "native_tokens_completion_images": 0, "native_tokens_prompt": 10, "native_tokens_reasoning": 5, "num_input_audio_prompt": 0, "num_media_completion": 0, "num_media_prompt": 1, "num_search_results": 5, "origin": "https://openrouter.ai/", "provider_name": "Infermatic", "provider_responses": [], "router": "openrouter/auto", "streamed": true, "tokens_completion": 25, "tokens_prompt": 10, "total_cost": 0.0015, "upstream_id": "chatcmpl-791bcf62-080e-4568-87d0-94c72e3b4946", "upstream_inference_cost": 0.0012, "usage": 0.0015, "user_agent": ""}}
+ application/json: {"data": {"api_type": "completions", "app_id": 12345, "cache_discount": 0.0002, "cancelled": false, "created_at": "2024-07-15T23:33:19.433273+00:00", "external_user": "user-123", "finish_reason": "stop", "generation_time": 1200, "http_referer": "", "id": "gen-3bhGkxlo4XFrqiabUM7NDtwDzWwG", "is_byok": false, "latency": 1250, "model": "sao10k/l3-stheno-8b", "moderation_latency": 50, "native_finish_reason": "stop", "native_tokens_cached": 3, "native_tokens_completion": 25, "native_tokens_completion_images": 0, "native_tokens_prompt": 10, "native_tokens_reasoning": 5, "num_input_audio_prompt": 0, "num_media_completion": 0, "num_media_prompt": 1, "num_search_results": 5, "origin": "https://openrouter.ai/", "provider_name": "Infermatic", "provider_responses": [], "router": "openrouter/auto", "streamed": true, "tokens_completion": 25, "tokens_prompt": 10, "total_cost": 0.0015, "upstream_id": "chatcmpl-791bcf62-080e-4568-87d0-94c72e3b4946", "upstream_inference_cost": 0.0012, "usage": 0.0015, "user_agent": "", "web_search_engine": "exa"}}
"401":
application/json: {"error": {"code": 401, "message": "Missing Authentication header"}}
"402":
@@ -5574,3 +5580,4 @@ examples:
"500":
application/json: {"error": {"code": 500, "message": "Internal Server Error"}}
examplesVersion: 1.0.2
+releaseNotes: "## Python SDK Changes:\n* `open_router.beta.responses.send()`: `response` **Changed** **Breaking** :warning:\n* `open_router.generations.get_generation()`: `response.data.web_search_engine` **Added**\n* `open_router.video_generation.generate()`: \n * `request.provider.options.baidu` **Added**\n"
diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml
index 27a3b9f..a104a8b 100644
--- a/.speakeasy/gen.yaml
+++ b/.speakeasy/gen.yaml
@@ -32,7 +32,7 @@ generation:
skipResponseBodyAssertions: false
preApplyUnionDiscriminators: true
python:
- version: 0.9.1
+ version: 0.9.2
additionalDependencies:
dev: {}
main: {}
diff --git a/.speakeasy/out.openapi.yaml b/.speakeasy/out.openapi.yaml
index 6cbf6ba..152678e 100644
--- a/.speakeasy/out.openapi.yaml
+++ b/.speakeasy/out.openapi.yaml
@@ -1489,6 +1489,15 @@ components:
- thinking
- signature
type: object
+ AnthropicThinkingDisplay:
+ enum:
+ - summarized
+ - omitted
+ - null
+ example: summarized
+ nullable: true
+ type: string
+ x-speakeasy-unknown-values: allow
AnthropicThinkingTurns:
example:
type: thinking_turns
@@ -5965,12 +5974,12 @@ components:
Configuration for the openrouter:image_generation server tool. Accepts all image_config params (aspect_ratio, quality, size, background, output_format, output_compression, moderation, etc.) plus a model field.
example:
aspect_ratio: '16:9'
- model: openai/gpt-image-1
+ model: openai/gpt-5-image
quality: high
properties:
model:
- description: Which image generation model to use (e.g. "openai/gpt-image-1"). Defaults to "openai/gpt-image-1".
- example: openai/gpt-image-1
+ description: Which image generation model to use (e.g. "openai/gpt-5-image"). Defaults to "openai/gpt-5-image".
+ example: openai/gpt-5-image
type: string
type: object
ImageGenerationStatus:
@@ -7255,11 +7264,12 @@ components:
properties:
effort:
description: >-
- How much effort the model should put into its response. Higher effort levels may result in more thorough analysis but take longer. Valid values are `low`, `medium`, `high`, or `max`.
+ How much effort the model should put into its response. Higher effort levels may result in more thorough analysis but take longer. Valid values are `low`, `medium`, `high`, `xhigh`, or `max`.
enum:
- low
- medium
- high
+ - xhigh
- max
- null
example: medium
@@ -7283,6 +7293,29 @@ components:
- type
- schema
type: object
+ task_budget:
+ description: >-
+ Task budget for an agentic turn. The model sees a countdown of remaining tokens and uses it to prioritize work and wind down gracefully. Advisory — does not enforce a hard cap.
+ example:
+ total: 400000
+ type: tokens
+ nullable: true
+ properties:
+ remaining:
+ minimum: 0
+ nullable: true
+ type: integer
+ total:
+ minimum: 20000
+ type: integer
+ type:
+ enum:
+ - tokens
+ type: string
+ required:
+ - type
+ - total
+ type: object
type: object
MessagesPingEvent:
description: Keep-alive ping event
@@ -7475,6 +7508,8 @@ components:
- properties:
budget_tokens:
type: integer
+ display:
+ $ref: '#/components/schemas/AnthropicThinkingDisplay'
type:
enum:
- enabled
@@ -7492,6 +7527,8 @@ components:
- type
type: object
- properties:
+ display:
+ $ref: '#/components/schemas/AnthropicThinkingDisplay'
type:
enum:
- adaptive
@@ -7699,6 +7736,23 @@ components:
allOf:
- $ref: '#/components/schemas/BaseMessagesResult'
- properties:
+ context_management:
+ nullable: true
+ properties:
+ applied_edits:
+ items:
+ additionalProperties:
+ nullable: true
+ properties:
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ required:
+ - applied_edits
+ type: object
provider:
$ref: '#/components/schemas/ProviderName'
usage:
@@ -7835,6 +7889,7 @@ components:
- AionLabs
- Alibaba
- Ambient
+ - Baidu
- Amazon Bedrock
- Amazon Nova
- Anthropic
@@ -8861,6 +8916,8 @@ components:
service_tier:
nullable: true
type: string
+ text:
+ $ref: '#/components/schemas/TextExtendedConfig'
usage:
$ref: '#/components/schemas/Usage'
type: object
@@ -9248,6 +9305,7 @@ components:
example:
id: ig_tmp_abc123
imageUrl: https://example.com/image.png
+ result: https://example.com/image.png
status: completed
type: openrouter:image_generation
properties:
@@ -9257,6 +9315,10 @@ components:
type: string
imageUrl:
type: string
+ result:
+ description: The generated image as a base64-encoded string or URL, matching OpenAI image_generation_call format
+ nullable: true
+ type: string
revisedPrompt:
type: string
status:
@@ -9504,6 +9566,7 @@ components:
openrouter:browser_use: '#/components/schemas/OutputBrowserUseServerToolItem'
openrouter:code_interpreter: '#/components/schemas/OutputCodeInterpreterServerToolItem'
openrouter:datetime: '#/components/schemas/OutputDatetimeItem'
+ openrouter:experimental__search_models: '#/components/schemas/OutputSearchModelsServerToolItem'
openrouter:file_search: '#/components/schemas/OutputFileSearchServerToolItem'
openrouter:image_generation: '#/components/schemas/OutputImageGenerationServerToolItem'
openrouter:mcp: '#/components/schemas/OutputMcpServerToolItem'
@@ -9545,6 +9608,7 @@ components:
- $ref: '#/components/schemas/OutputToolSearchServerToolItem'
- $ref: '#/components/schemas/OutputMemoryServerToolItem'
- $ref: '#/components/schemas/OutputMcpServerToolItem'
+ - $ref: '#/components/schemas/OutputSearchModelsServerToolItem'
OutputItemWebSearchCall:
example:
action:
@@ -9788,6 +9852,32 @@ components:
- text: Analyzed the problem and found the optimal solution.
type: summary_text
type: reasoning
+ OutputSearchModelsServerToolItem:
+ description: An openrouter:experimental__search_models server tool output item
+ example:
+ arguments: '{"query":"Claude Opus"}'
+ id: sm_tmp_abc123
+ query: Claude Opus
+ status: completed
+ type: openrouter:experimental__search_models
+ properties:
+ arguments:
+ description: The JSON arguments submitted to the search tool (e.g. {"query":"Claude"})
+ type: string
+ id:
+ type: string
+ query:
+ type: string
+ status:
+ $ref: '#/components/schemas/ToolCallStatus'
+ type:
+ enum:
+ - openrouter:experimental__search_models
+ type: string
+ required:
+ - status
+ - type
+ type: object
OutputTextEditorServerToolItem:
description: An openrouter:text_editor server tool output item
example:
@@ -10228,6 +10318,7 @@ components:
- AionLabs
- Alibaba
- Ambient
+ - Baidu
- Amazon Bedrock
- Amazon Nova
- Anthropic
@@ -10533,6 +10624,7 @@ components:
- AionLabs
- Alibaba
- Ambient
+ - Baidu
- Amazon Bedrock
- Amazon Nova
- Anthropic
@@ -11985,7 +12077,18 @@ components:
TextExtendedConfig:
allOf:
- $ref: '#/components/schemas/TextConfig'
- - properties: {}
+ - properties:
+ verbosity:
+ enum:
+ - low
+ - medium
+ - high
+ - xhigh
+ - max
+ - null
+ nullable: true
+ type: string
+ x-speakeasy-unknown-values: allow
type: object
description: Text output configuration including format and verbosity
example:
@@ -12469,6 +12572,10 @@ components:
additionalProperties:
nullable: true
type: object
+ baidu:
+ additionalProperties:
+ nullable: true
+ type: object
baseten:
additionalProperties:
nullable: true
@@ -14855,6 +14962,11 @@ paths:
description: User-Agent header from the request
nullable: true
type: string
+ web_search_engine:
+ description: The resolved web search engine used for this generation (e.g. exa, firecrawl, parallel)
+ example: exa
+ nullable: true
+ type: string
required:
- id
- upstream_id
@@ -14882,6 +14994,7 @@ paths:
- num_input_audio_prompt
- num_media_completion
- num_search_results
+ - web_search_engine
- origin
- usage
- is_byok
diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock
index d4e0269..cfde0db 100644
--- a/.speakeasy/workflow.lock
+++ b/.speakeasy/workflow.lock
@@ -2,19 +2,20 @@ speakeasyVersion: 1.680.0
sources:
OpenRouter API:
sourceNamespace: open-router-chat-completions-api
- sourceRevisionDigest: sha256:3982892b2b8f82bfc2a26a0f6fcc9512b665a1051dc240a9cfa29001b9453aa9
- sourceBlobDigest: sha256:727d3c1f7d36ffbf94c57b565536d8a16804eaef4c4043dd6c98152fc1629c8f
+ sourceRevisionDigest: sha256:08722e0a4c690fe092b1524adfbddb388e84ff8e6e851ee0839f00bdad8dd1f1
+ sourceBlobDigest: sha256:f7157c6aea54b19cf7524859f989a386f16ecd42684f1463c5d2441b72fcad62
tags:
- latest
+ - speakeasy-sdk-regen-1776300046
- 1.0.0
targets:
open-router:
source: OpenRouter API
sourceNamespace: open-router-chat-completions-api
- sourceRevisionDigest: sha256:3982892b2b8f82bfc2a26a0f6fcc9512b665a1051dc240a9cfa29001b9453aa9
- sourceBlobDigest: sha256:727d3c1f7d36ffbf94c57b565536d8a16804eaef4c4043dd6c98152fc1629c8f
+ sourceRevisionDigest: sha256:08722e0a4c690fe092b1524adfbddb388e84ff8e6e851ee0839f00bdad8dd1f1
+ sourceBlobDigest: sha256:f7157c6aea54b19cf7524859f989a386f16ecd42684f1463c5d2441b72fcad62
codeSamplesNamespace: open-router-python-code-samples
- codeSamplesRevisionDigest: sha256:db86aed74d199f265e2e20442ef652dac0911c8a657ccb3e6614d56a26b8b44e
+ codeSamplesRevisionDigest: sha256:6cc9415478b37c7f494e2549a92dbd967f414010b493b75c18730d9e5a6e5c57
workflow:
workflowVersion: 1.0.0
speakeasyVersion: 1.680.0
diff --git a/README-PYPI.md b/README-PYPI.md
index 5f7e784..35f11f1 100644
--- a/README-PYPI.md
+++ b/README-PYPI.md
@@ -169,6 +169,36 @@ asyncio.run(main())
+
+## Pagination
+
+Some of the endpoints in this SDK support pagination. To use pagination, you make your SDK calls as usual, but the
+returned response object will have a `Next` method that can be called to pull down the next group of results. If the
+return value of `Next` is `None`, then there are no more pages to be fetched.
+
+Here's an example of one such pagination call:
+```python
+from openrouter import OpenRouter
+import os
+
+
+with OpenRouter(
+ http_referer="",
+ x_open_router_title="",
+ x_open_router_categories="",
+ api_key=os.getenv("OPENROUTER_API_KEY", ""),
+) as open_router:
+
+ res = open_router.guardrails.list()
+
+ while res is not None:
+ # Handle items
+
+ res = res.next()
+
+```
+
+
## Resource Management
diff --git a/RELEASES.md b/RELEASES.md
index 0d0d01f..87d1de3 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -8,4 +8,14 @@ Based on:
### Generated
- [python v0.0.16] .
### Releases
-- [PyPI v0.0.16] https://pypi.org/project/openrouter/0.0.16 - .
\ No newline at end of file
+- [PyPI v0.0.16] https://pypi.org/project/openrouter/0.0.16 - .
+
+## 2026-04-18 00:34:54
+### Changes
+Based on:
+- OpenAPI Doc
+- Speakeasy CLI 1.680.0 (2.788.4) https://github.com/speakeasy-api/speakeasy
+### Generated
+- [python v0.9.2] .
+### Releases
+- [PyPI v0.9.2] https://pypi.org/project/openrouter/0.9.2 - .
\ No newline at end of file
diff --git a/docs/components/providername.md b/docs/components/providername.md
index 22effdd..1136a77 100644
--- a/docs/components/providername.md
+++ b/docs/components/providername.md
@@ -10,6 +10,7 @@
| `AION_LABS` | AionLabs |
| `ALIBABA` | Alibaba |
| `AMBIENT` | Ambient |
+| `BAIDU` | Baidu |
| `AMAZON_BEDROCK` | Amazon Bedrock |
| `AMAZON_NOVA` | Amazon Nova |
| `ANTHROPIC` | Anthropic |
diff --git a/docs/operations/getgenerationdata.md b/docs/operations/getgenerationdata.md
index 7be5766..183399a 100644
--- a/docs/operations/getgenerationdata.md
+++ b/docs/operations/getgenerationdata.md
@@ -5,43 +5,44 @@ Generation data
## Fields
-| Field | Type | Required | Description | Example |
-| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- |
-| `api_type` | [Nullable[operations.APIType]](../operations/apitype.md) | :heavy_check_mark: | Type of API used for the generation | |
-| `app_id` | *Nullable[int]* | :heavy_check_mark: | ID of the app that made the request | 12345 |
-| `cache_discount` | *Nullable[float]* | :heavy_check_mark: | Discount applied due to caching | 0.0002 |
-| `cancelled` | *Nullable[bool]* | :heavy_check_mark: | Whether the generation was cancelled | false |
-| `created_at` | *str* | :heavy_check_mark: | ISO 8601 timestamp of when the generation was created | 2024-07-15T23:33:19.433273+00:00 |
-| `external_user` | *Nullable[str]* | :heavy_check_mark: | External user identifier | user-123 |
-| `finish_reason` | *Nullable[str]* | :heavy_check_mark: | Reason the generation finished | stop |
-| `generation_time` | *Nullable[float]* | :heavy_check_mark: | Time taken for generation in milliseconds | 1200 |
-| `http_referer` | *Nullable[str]* | :heavy_check_mark: | Referer header from the request | |
-| `id` | *str* | :heavy_check_mark: | Unique identifier for the generation | gen-3bhGkxlo4XFrqiabUM7NDtwDzWwG |
-| `is_byok` | *bool* | :heavy_check_mark: | Whether this used bring-your-own-key | false |
-| `latency` | *Nullable[float]* | :heavy_check_mark: | Total latency in milliseconds | 1250 |
-| `model` | *str* | :heavy_check_mark: | Model used for the generation | sao10k/l3-stheno-8b |
-| `moderation_latency` | *Nullable[float]* | :heavy_check_mark: | Moderation latency in milliseconds | 50 |
-| `native_finish_reason` | *Nullable[str]* | :heavy_check_mark: | Native finish reason as reported by provider | stop |
-| `native_tokens_cached` | *Nullable[int]* | :heavy_check_mark: | Native cached tokens as reported by provider | 3 |
-| `native_tokens_completion` | *Nullable[int]* | :heavy_check_mark: | Native completion tokens as reported by provider | 25 |
-| `native_tokens_completion_images` | *Nullable[int]* | :heavy_check_mark: | Native completion image tokens as reported by provider | 0 |
-| `native_tokens_prompt` | *Nullable[int]* | :heavy_check_mark: | Native prompt tokens as reported by provider | 10 |
-| `native_tokens_reasoning` | *Nullable[int]* | :heavy_check_mark: | Native reasoning tokens as reported by provider | 5 |
-| `num_input_audio_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of audio inputs in the prompt | 0 |
-| `num_media_completion` | *Nullable[int]* | :heavy_check_mark: | Number of media items in the completion | 0 |
-| `num_media_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of media items in the prompt | 1 |
-| `num_search_results` | *Nullable[int]* | :heavy_check_mark: | Number of search results included | 5 |
-| `origin` | *str* | :heavy_check_mark: | Origin URL of the request | https://openrouter.ai/ |
-| `provider_name` | *Nullable[str]* | :heavy_check_mark: | Name of the provider that served the request | Infermatic |
-| `provider_responses` | List[[components.ProviderResponse](../components/providerresponse.md)] | :heavy_check_mark: | List of provider responses for this generation, including fallback attempts | |
-| `request_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Unique identifier grouping all generations from a single API request | req-1727282430-aBcDeFgHiJkLmNoPqRsT |
-| `router` | *Nullable[str]* | :heavy_check_mark: | Router used for the request (e.g., openrouter/auto) | openrouter/auto |
-| `session_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Session identifier grouping multiple generations in the same session | |
-| `streamed` | *Nullable[bool]* | :heavy_check_mark: | Whether the response was streamed | true |
-| `tokens_completion` | *Nullable[int]* | :heavy_check_mark: | Number of tokens in the completion | 25 |
-| `tokens_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of tokens in the prompt | 10 |
-| `total_cost` | *float* | :heavy_check_mark: | Total cost of the generation in USD | 0.0015 |
-| `upstream_id` | *Nullable[str]* | :heavy_check_mark: | Upstream provider's identifier for this generation | chatcmpl-791bcf62-080e-4568-87d0-94c72e3b4946 |
-| `upstream_inference_cost` | *Nullable[float]* | :heavy_check_mark: | Cost charged by the upstream provider | 0.0012 |
-| `usage` | *float* | :heavy_check_mark: | Usage amount in USD | 0.0015 |
-| `user_agent` | *Nullable[str]* | :heavy_check_mark: | User-Agent header from the request | |
\ No newline at end of file
+| Field | Type | Required | Description | Example |
+| --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
+| `api_type` | [Nullable[operations.APIType]](../operations/apitype.md) | :heavy_check_mark: | Type of API used for the generation | |
+| `app_id` | *Nullable[int]* | :heavy_check_mark: | ID of the app that made the request | 12345 |
+| `cache_discount` | *Nullable[float]* | :heavy_check_mark: | Discount applied due to caching | 0.0002 |
+| `cancelled` | *Nullable[bool]* | :heavy_check_mark: | Whether the generation was cancelled | false |
+| `created_at` | *str* | :heavy_check_mark: | ISO 8601 timestamp of when the generation was created | 2024-07-15T23:33:19.433273+00:00 |
+| `external_user` | *Nullable[str]* | :heavy_check_mark: | External user identifier | user-123 |
+| `finish_reason` | *Nullable[str]* | :heavy_check_mark: | Reason the generation finished | stop |
+| `generation_time` | *Nullable[float]* | :heavy_check_mark: | Time taken for generation in milliseconds | 1200 |
+| `http_referer` | *Nullable[str]* | :heavy_check_mark: | Referer header from the request | |
+| `id` | *str* | :heavy_check_mark: | Unique identifier for the generation | gen-3bhGkxlo4XFrqiabUM7NDtwDzWwG |
+| `is_byok` | *bool* | :heavy_check_mark: | Whether this used bring-your-own-key | false |
+| `latency` | *Nullable[float]* | :heavy_check_mark: | Total latency in milliseconds | 1250 |
+| `model` | *str* | :heavy_check_mark: | Model used for the generation | sao10k/l3-stheno-8b |
+| `moderation_latency` | *Nullable[float]* | :heavy_check_mark: | Moderation latency in milliseconds | 50 |
+| `native_finish_reason` | *Nullable[str]* | :heavy_check_mark: | Native finish reason as reported by provider | stop |
+| `native_tokens_cached` | *Nullable[int]* | :heavy_check_mark: | Native cached tokens as reported by provider | 3 |
+| `native_tokens_completion` | *Nullable[int]* | :heavy_check_mark: | Native completion tokens as reported by provider | 25 |
+| `native_tokens_completion_images` | *Nullable[int]* | :heavy_check_mark: | Native completion image tokens as reported by provider | 0 |
+| `native_tokens_prompt` | *Nullable[int]* | :heavy_check_mark: | Native prompt tokens as reported by provider | 10 |
+| `native_tokens_reasoning` | *Nullable[int]* | :heavy_check_mark: | Native reasoning tokens as reported by provider | 5 |
+| `num_input_audio_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of audio inputs in the prompt | 0 |
+| `num_media_completion` | *Nullable[int]* | :heavy_check_mark: | Number of media items in the completion | 0 |
+| `num_media_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of media items in the prompt | 1 |
+| `num_search_results` | *Nullable[int]* | :heavy_check_mark: | Number of search results included | 5 |
+| `origin` | *str* | :heavy_check_mark: | Origin URL of the request | https://openrouter.ai/ |
+| `provider_name` | *Nullable[str]* | :heavy_check_mark: | Name of the provider that served the request | Infermatic |
+| `provider_responses` | List[[components.ProviderResponse](../components/providerresponse.md)] | :heavy_check_mark: | List of provider responses for this generation, including fallback attempts | |
+| `request_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Unique identifier grouping all generations from a single API request | req-1727282430-aBcDeFgHiJkLmNoPqRsT |
+| `router` | *Nullable[str]* | :heavy_check_mark: | Router used for the request (e.g., openrouter/auto) | openrouter/auto |
+| `session_id` | *OptionalNullable[str]* | :heavy_minus_sign: | Session identifier grouping multiple generations in the same session | |
+| `streamed` | *Nullable[bool]* | :heavy_check_mark: | Whether the response was streamed | true |
+| `tokens_completion` | *Nullable[int]* | :heavy_check_mark: | Number of tokens in the completion | 25 |
+| `tokens_prompt` | *Nullable[int]* | :heavy_check_mark: | Number of tokens in the prompt | 10 |
+| `total_cost` | *float* | :heavy_check_mark: | Total cost of the generation in USD | 0.0015 |
+| `upstream_id` | *Nullable[str]* | :heavy_check_mark: | Upstream provider's identifier for this generation | chatcmpl-791bcf62-080e-4568-87d0-94c72e3b4946 |
+| `upstream_inference_cost` | *Nullable[float]* | :heavy_check_mark: | Cost charged by the upstream provider | 0.0012 |
+| `usage` | *float* | :heavy_check_mark: | Usage amount in USD | 0.0015 |
+| `user_agent` | *Nullable[str]* | :heavy_check_mark: | User-Agent header from the request | |
+| `web_search_engine` | *Nullable[str]* | :heavy_check_mark: | The resolved web search engine used for this generation (e.g. exa, firecrawl, parallel) | exa |
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index c678027..9e4606d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,9 +1,9 @@
[project]
name = "openrouter"
-version = "0.9.1"
+version = "0.9.2"
description = "Official Python Client SDK for OpenRouter."
authors = [{ name = "OpenRouter" },]
-readme = "README.md"
+readme = "README-PYPI.md"
requires-python = ">=3.9.2"
dependencies = [
"httpcore >=1.0.9",
@@ -11,6 +11,7 @@ dependencies = [
"jsonpath-python >=1.0.6",
"pydantic >=2.11.2",
]
+urls.repository = "https://github.com/OpenRouterTeam/python-sdk.git"
license = { text = "Apache-2.0" }
[dependency-groups]
diff --git a/scripts/publish.sh b/scripts/publish.sh
index ef28dc1..c35748f 100755
--- a/scripts/publish.sh
+++ b/scripts/publish.sh
@@ -1,4 +1,6 @@
#!/usr/bin/env bash
+uv run python scripts/prepare_readme.py
+
uv build
uv publish --token $PYPI_TOKEN
diff --git a/src/openrouter/_version.py b/src/openrouter/_version.py
index 4d43d33..fcebde3 100644
--- a/src/openrouter/_version.py
+++ b/src/openrouter/_version.py
@@ -3,10 +3,10 @@
import importlib.metadata
__title__: str = "openrouter"
-__version__: str = "0.9.1"
+__version__: str = "0.9.2"
__openapi_doc_version__: str = "1.0.0"
__gen_version__: str = "2.788.4"
-__user_agent__: str = "speakeasy-sdk/python 0.9.1 2.788.4 1.0.0 openrouter"
+__user_agent__: str = "speakeasy-sdk/python 0.9.2 2.788.4 1.0.0 openrouter"
try:
if __package__ is not None:
diff --git a/src/openrouter/components/__init__.py b/src/openrouter/components/__init__.py
index e41de29..cce159e 100644
--- a/src/openrouter/components/__init__.py
+++ b/src/openrouter/components/__init__.py
@@ -921,6 +921,11 @@
OutputReasoningItemType,
OutputReasoningItemTypedDict,
)
+ from .outputsearchmodelsservertoolitem import (
+ OutputSearchModelsServerToolItem,
+ OutputSearchModelsServerToolItemType,
+ OutputSearchModelsServerToolItemTypedDict,
+ )
from .outputtexteditorservertoolitem import (
Command,
OutputTextEditorServerToolItem,
@@ -1217,7 +1222,6 @@
StreamLogprobTopLogprob,
StreamLogprobTopLogprobTypedDict,
)
- from .textconfig import TextConfig, TextConfigTypedDict, TextConfigVerbosity
from .textdeltaevent import (
TextDeltaEvent,
TextDeltaEventType,
@@ -1227,7 +1231,7 @@
from .textextendedconfig import (
TextExtendedConfig,
TextExtendedConfigTypedDict,
- TextExtendedConfigVerbosity,
+ Verbosity,
)
from .toolcallstatus import ToolCallStatus
from .toolchoiceallowed import (
@@ -2058,6 +2062,9 @@
"OutputReasoningItemStatusUnionTypedDict",
"OutputReasoningItemType",
"OutputReasoningItemTypedDict",
+ "OutputSearchModelsServerToolItem",
+ "OutputSearchModelsServerToolItemType",
+ "OutputSearchModelsServerToolItemTypedDict",
"OutputTextEditorServerToolItem",
"OutputTextEditorServerToolItemType",
"OutputTextEditorServerToolItemTypedDict",
@@ -2271,9 +2278,6 @@
"SupportedResolution",
"SupportedSize",
"Syntax",
- "TextConfig",
- "TextConfigTypedDict",
- "TextConfigVerbosity",
"TextDeltaEvent",
"TextDeltaEventType",
"TextDeltaEventTypedDict",
@@ -2282,7 +2286,6 @@
"TextDoneEventTypedDict",
"TextExtendedConfig",
"TextExtendedConfigTypedDict",
- "TextExtendedConfigVerbosity",
"Tokenizer",
"TooManyRequestsResponseErrorData",
"TooManyRequestsResponseErrorDataTypedDict",
@@ -2326,6 +2329,7 @@
"Value2TypedDict",
"Variables",
"VariablesTypedDict",
+ "Verbosity",
"VideoGenerationRequest",
"VideoGenerationRequestTypedDict",
"VideoGenerationResponse",
@@ -3074,6 +3078,9 @@
"OutputReasoningItemStatusUnionTypedDict": ".outputreasoningitem",
"OutputReasoningItemType": ".outputreasoningitem",
"OutputReasoningItemTypedDict": ".outputreasoningitem",
+ "OutputSearchModelsServerToolItem": ".outputsearchmodelsservertoolitem",
+ "OutputSearchModelsServerToolItemType": ".outputsearchmodelsservertoolitem",
+ "OutputSearchModelsServerToolItemTypedDict": ".outputsearchmodelsservertoolitem",
"Command": ".outputtexteditorservertoolitem",
"OutputTextEditorServerToolItem": ".outputtexteditorservertoolitem",
"OutputTextEditorServerToolItemType": ".outputtexteditorservertoolitem",
@@ -3281,9 +3288,6 @@
"StreamLogprobTypedDict": ".streamlogprob",
"StreamLogprobTopLogprob": ".streamlogprobtoplogprob",
"StreamLogprobTopLogprobTypedDict": ".streamlogprobtoplogprob",
- "TextConfig": ".textconfig",
- "TextConfigTypedDict": ".textconfig",
- "TextConfigVerbosity": ".textconfig",
"TextDeltaEvent": ".textdeltaevent",
"TextDeltaEventType": ".textdeltaevent",
"TextDeltaEventTypedDict": ".textdeltaevent",
@@ -3292,7 +3296,7 @@
"TextDoneEventTypedDict": ".textdoneevent",
"TextExtendedConfig": ".textextendedconfig",
"TextExtendedConfigTypedDict": ".textextendedconfig",
- "TextExtendedConfigVerbosity": ".textextendedconfig",
+ "Verbosity": ".textextendedconfig",
"ToolCallStatus": ".toolcallstatus",
"Mode": ".toolchoiceallowed",
"ModeAuto": ".toolchoiceallowed",
diff --git a/src/openrouter/components/imagegenerationservertoolconfig.py b/src/openrouter/components/imagegenerationservertoolconfig.py
index 7b7010a..0fdc8f6 100644
--- a/src/openrouter/components/imagegenerationservertoolconfig.py
+++ b/src/openrouter/components/imagegenerationservertoolconfig.py
@@ -13,7 +13,7 @@ class ImageGenerationServerToolConfigTypedDict(TypedDict):
r"""Configuration for the openrouter:image_generation server tool. Accepts all image_config params (aspect_ratio, quality, size, background, output_format, output_compression, moderation, etc.) plus a model field."""
model: NotRequired[str]
- r"""Which image generation model to use (e.g. \"openai/gpt-image-1\"). Defaults to \"openai/gpt-image-1\"."""
+ r"""Which image generation model to use (e.g. \"openai/gpt-5-image\"). Defaults to \"openai/gpt-5-image\"."""
class ImageGenerationServerToolConfig(BaseModel):
@@ -27,7 +27,7 @@ class ImageGenerationServerToolConfig(BaseModel):
)
model: Optional[str] = None
- r"""Which image generation model to use (e.g. \"openai/gpt-image-1\"). Defaults to \"openai/gpt-image-1\"."""
+ r"""Which image generation model to use (e.g. \"openai/gpt-5-image\"). Defaults to \"openai/gpt-5-image\"."""
@property
def additional_properties(self):
diff --git a/src/openrouter/components/openresponsesresult.py b/src/openrouter/components/openresponsesresult.py
index fa04955..ea63258 100644
--- a/src/openrouter/components/openresponsesresult.py
+++ b/src/openrouter/components/openresponsesresult.py
@@ -39,7 +39,7 @@
from .responseserrorfield import ResponsesErrorField, ResponsesErrorFieldTypedDict
from .shellservertool import ShellServerTool, ShellServerToolTypedDict
from .storedprompttemplate import StoredPromptTemplate, StoredPromptTemplateTypedDict
-from .textconfig import TextConfig, TextConfigTypedDict
+from .textextendedconfig import TextExtendedConfig, TextExtendedConfigTypedDict
from .truncation import Truncation
from .usage import Usage, UsageTypedDict
from .websearchservertool import WebSearchServerTool, WebSearchServerToolTypedDict
@@ -195,7 +195,7 @@ class OpenResponsesResultTypedDict(TypedDict):
safety_identifier: NotRequired[Nullable[str]]
service_tier: NotRequired[Nullable[str]]
store: NotRequired[bool]
- text: NotRequired[TextConfigTypedDict]
+ text: NotRequired[TextExtendedConfigTypedDict]
r"""Text output configuration including format and verbosity"""
top_logprobs: NotRequired[int]
truncation: NotRequired[Nullable[Truncation]]
@@ -269,7 +269,7 @@ class OpenResponsesResult(BaseModel):
store: Optional[bool] = None
- text: Optional[TextConfig] = None
+ text: Optional[TextExtendedConfig] = None
r"""Text output configuration including format and verbosity"""
top_logprobs: Optional[int] = None
diff --git a/src/openrouter/components/outputimagegenerationservertoolitem.py b/src/openrouter/components/outputimagegenerationservertoolitem.py
index e91756b..a94bd09 100644
--- a/src/openrouter/components/outputimagegenerationservertoolitem.py
+++ b/src/openrouter/components/outputimagegenerationservertoolitem.py
@@ -2,9 +2,16 @@
from __future__ import annotations
from .toolcallstatus import ToolCallStatus
-from openrouter.types import BaseModel
+from openrouter.types import (
+ BaseModel,
+ Nullable,
+ OptionalNullable,
+ UNSET,
+ UNSET_SENTINEL,
+)
from openrouter.utils import validate_open_enum
import pydantic
+from pydantic import model_serializer
from pydantic.functional_validators import PlainValidator
from typing import Literal, Optional
from typing_extensions import Annotated, NotRequired, TypedDict
@@ -21,6 +28,8 @@ class OutputImageGenerationServerToolItemTypedDict(TypedDict):
id: NotRequired[str]
image_b64: NotRequired[str]
image_url: NotRequired[str]
+ result: NotRequired[Nullable[str]]
+ r"""The generated image as a base64-encoded string or URL, matching OpenAI image_generation_call format"""
revised_prompt: NotRequired[str]
@@ -37,6 +46,39 @@ class OutputImageGenerationServerToolItem(BaseModel):
image_url: Annotated[Optional[str], pydantic.Field(alias="imageUrl")] = None
+ result: OptionalNullable[str] = UNSET
+ r"""The generated image as a base64-encoded string or URL, matching OpenAI image_generation_call format"""
+
revised_prompt: Annotated[Optional[str], pydantic.Field(alias="revisedPrompt")] = (
None
)
+
+ @model_serializer(mode="wrap")
+ def serialize_model(self, handler):
+ optional_fields = ["id", "imageB64", "imageUrl", "result", "revisedPrompt"]
+ nullable_fields = ["result"]
+ null_default_fields = []
+
+ serialized = handler(self)
+
+ m = {}
+
+ for n, f in type(self).model_fields.items():
+ k = f.alias or n
+ val = serialized.get(k)
+ serialized.pop(k, None)
+
+ optional_nullable = k in optional_fields and k in nullable_fields
+ is_set = (
+ self.__pydantic_fields_set__.intersection({n})
+ or k in null_default_fields
+ ) # pylint: disable=no-member
+
+ if val is not None and val != UNSET_SENTINEL:
+ m[k] = val
+ elif val != UNSET_SENTINEL and (
+ not k in optional_fields or (optional_nullable and is_set)
+ ):
+ m[k] = val
+
+ return m
diff --git a/src/openrouter/components/outputitems.py b/src/openrouter/components/outputitems.py
index 4457f80..75ffbc5 100644
--- a/src/openrouter/components/outputitems.py
+++ b/src/openrouter/components/outputitems.py
@@ -56,6 +56,10 @@
)
from .outputmessageitem import OutputMessageItem, OutputMessageItemTypedDict
from .outputreasoningitem import OutputReasoningItem, OutputReasoningItemTypedDict
+from .outputsearchmodelsservertoolitem import (
+ OutputSearchModelsServerToolItem,
+ OutputSearchModelsServerToolItemTypedDict,
+)
from .outputtexteditorservertoolitem import (
OutputTextEditorServerToolItem,
OutputTextEditorServerToolItemTypedDict,
@@ -94,18 +98,19 @@
OutputTextEditorServerToolItemTypedDict,
OutputApplyPatchServerToolItemTypedDict,
OutputDatetimeItemTypedDict,
+ OutputSearchModelsServerToolItemTypedDict,
OutputMcpServerToolItemTypedDict,
OutputBrowserUseServerToolItemTypedDict,
OutputFunctionCallItemTypedDict,
- OutputImageGenerationServerToolItemTypedDict,
OutputMessageItemTypedDict,
OutputComputerCallItemTypedDict,
OutputWebFetchServerToolItemTypedDict,
OutputMemoryServerToolItemTypedDict,
OutputCodeInterpreterCallItemTypedDict,
+ OutputImageGenerationServerToolItemTypedDict,
OutputBashServerToolItemTypedDict,
- OutputCodeInterpreterServerToolItemTypedDict,
OutputReasoningItemTypedDict,
+ OutputCodeInterpreterServerToolItemTypedDict,
],
)
r"""An output item from the response"""
@@ -126,6 +131,10 @@
OutputCodeInterpreterServerToolItem, Tag("openrouter:code_interpreter")
],
Annotated[OutputDatetimeItem, Tag("openrouter:datetime")],
+ Annotated[
+ OutputSearchModelsServerToolItem,
+ Tag("openrouter:experimental__search_models"),
+ ],
Annotated[OutputFileSearchServerToolItem, Tag("openrouter:file_search")],
Annotated[
OutputImageGenerationServerToolItem, Tag("openrouter:image_generation")
diff --git a/src/openrouter/components/outputsearchmodelsservertoolitem.py b/src/openrouter/components/outputsearchmodelsservertoolitem.py
new file mode 100644
index 0000000..f94949d
--- /dev/null
+++ b/src/openrouter/components/outputsearchmodelsservertoolitem.py
@@ -0,0 +1,40 @@
+"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
+
+from __future__ import annotations
+from .toolcallstatus import ToolCallStatus
+from openrouter.types import BaseModel
+from openrouter.utils import validate_open_enum
+from pydantic.functional_validators import PlainValidator
+from typing import Literal, Optional
+from typing_extensions import Annotated, NotRequired, TypedDict
+
+
+OutputSearchModelsServerToolItemType = Literal[
+ "openrouter:experimental__search_models",
+]
+
+
+class OutputSearchModelsServerToolItemTypedDict(TypedDict):
+ r"""An openrouter:experimental__search_models server tool output item"""
+
+ status: ToolCallStatus
+ type: OutputSearchModelsServerToolItemType
+ arguments: NotRequired[str]
+ r"""The JSON arguments submitted to the search tool (e.g. {\"query\":\"Claude\"})"""
+ id: NotRequired[str]
+ query: NotRequired[str]
+
+
+class OutputSearchModelsServerToolItem(BaseModel):
+ r"""An openrouter:experimental__search_models server tool output item"""
+
+ status: Annotated[ToolCallStatus, PlainValidator(validate_open_enum(False))]
+
+ type: OutputSearchModelsServerToolItemType
+
+ arguments: Optional[str] = None
+ r"""The JSON arguments submitted to the search tool (e.g. {\"query\":\"Claude\"})"""
+
+ id: Optional[str] = None
+
+ query: Optional[str] = None
diff --git a/src/openrouter/components/providername.py b/src/openrouter/components/providername.py
index 7764a12..109379a 100644
--- a/src/openrouter/components/providername.py
+++ b/src/openrouter/components/providername.py
@@ -12,6 +12,7 @@
"AionLabs",
"Alibaba",
"Ambient",
+ "Baidu",
"Amazon Bedrock",
"Amazon Nova",
"Anthropic",
diff --git a/src/openrouter/components/providerresponse.py b/src/openrouter/components/providerresponse.py
index 4856eee..14f4f7e 100644
--- a/src/openrouter/components/providerresponse.py
+++ b/src/openrouter/components/providerresponse.py
@@ -44,6 +44,7 @@
"AionLabs",
"Alibaba",
"Ambient",
+ "Baidu",
"Amazon Bedrock",
"Amazon Nova",
"Anthropic",
diff --git a/src/openrouter/components/textconfig.py b/src/openrouter/components/textconfig.py
deleted file mode 100644
index cf8fa53..0000000
--- a/src/openrouter/components/textconfig.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
-
-from __future__ import annotations
-from .formats import Formats, FormatsTypedDict
-from openrouter.types import (
- BaseModel,
- Nullable,
- OptionalNullable,
- UNSET,
- UNSET_SENTINEL,
- UnrecognizedStr,
-)
-from openrouter.utils import validate_open_enum
-import pydantic
-from pydantic import model_serializer
-from pydantic.functional_validators import PlainValidator
-from typing import Literal, Optional, Union
-from typing_extensions import Annotated, NotRequired, TypedDict
-
-
-TextConfigVerbosity = Union[
- Literal[
- "high",
- "low",
- "medium",
- ],
- UnrecognizedStr,
-]
-
-
-class TextConfigTypedDict(TypedDict):
- r"""Text output configuration including format and verbosity"""
-
- format_: NotRequired[FormatsTypedDict]
- r"""Text response format configuration"""
- verbosity: NotRequired[Nullable[TextConfigVerbosity]]
-
-
-class TextConfig(BaseModel):
- r"""Text output configuration including format and verbosity"""
-
- format_: Annotated[Optional[Formats], pydantic.Field(alias="format")] = None
- r"""Text response format configuration"""
-
- verbosity: Annotated[
- OptionalNullable[TextConfigVerbosity], PlainValidator(validate_open_enum(False))
- ] = UNSET
-
- @model_serializer(mode="wrap")
- def serialize_model(self, handler):
- optional_fields = ["format", "verbosity"]
- nullable_fields = ["verbosity"]
- null_default_fields = []
-
- serialized = handler(self)
-
- m = {}
-
- for n, f in type(self).model_fields.items():
- k = f.alias or n
- val = serialized.get(k)
- serialized.pop(k, None)
-
- optional_nullable = k in optional_fields and k in nullable_fields
- is_set = (
- self.__pydantic_fields_set__.intersection({n})
- or k in null_default_fields
- ) # pylint: disable=no-member
-
- if val is not None and val != UNSET_SENTINEL:
- m[k] = val
- elif val != UNSET_SENTINEL and (
- not k in optional_fields or (optional_nullable and is_set)
- ):
- m[k] = val
-
- return m
diff --git a/src/openrouter/components/textextendedconfig.py b/src/openrouter/components/textextendedconfig.py
index ee561b1..89224c7 100644
--- a/src/openrouter/components/textextendedconfig.py
+++ b/src/openrouter/components/textextendedconfig.py
@@ -18,11 +18,13 @@
from typing_extensions import Annotated, NotRequired, TypedDict
-TextExtendedConfigVerbosity = Union[
+Verbosity = Union[
Literal[
- "high",
"low",
"medium",
+ "high",
+ "xhigh",
+ "max",
],
UnrecognizedStr,
]
@@ -33,7 +35,7 @@ class TextExtendedConfigTypedDict(TypedDict):
format_: NotRequired[FormatsTypedDict]
r"""Text response format configuration"""
- verbosity: NotRequired[Nullable[TextExtendedConfigVerbosity]]
+ verbosity: NotRequired[Nullable[Verbosity]]
class TextExtendedConfig(BaseModel):
@@ -43,8 +45,7 @@ class TextExtendedConfig(BaseModel):
r"""Text response format configuration"""
verbosity: Annotated[
- OptionalNullable[TextExtendedConfigVerbosity],
- PlainValidator(validate_open_enum(False)),
+ OptionalNullable[Verbosity], PlainValidator(validate_open_enum(False))
] = UNSET
@model_serializer(mode="wrap")
diff --git a/src/openrouter/components/videogenerationrequest.py b/src/openrouter/components/videogenerationrequest.py
index db54f12..4efa2a0 100644
--- a/src/openrouter/components/videogenerationrequest.py
+++ b/src/openrouter/components/videogenerationrequest.py
@@ -44,6 +44,7 @@ class OptionsTypedDict(TypedDict):
atoma: NotRequired[Dict[str, Nullable[Any]]]
avian: NotRequired[Dict[str, Nullable[Any]]]
azure: NotRequired[Dict[str, Nullable[Any]]]
+ baidu: NotRequired[Dict[str, Nullable[Any]]]
baseten: NotRequired[Dict[str, Nullable[Any]]]
black_forest_labs: NotRequired[Dict[str, Nullable[Any]]]
byteplus: NotRequired[Dict[str, Nullable[Any]]]
@@ -182,6 +183,8 @@ class Options(BaseModel):
azure: Optional[Dict[str, Nullable[Any]]] = None
+ baidu: Optional[Dict[str, Nullable[Any]]] = None
+
baseten: Optional[Dict[str, Nullable[Any]]] = None
black_forest_labs: Annotated[
diff --git a/src/openrouter/operations/getgeneration.py b/src/openrouter/operations/getgeneration.py
index 94dfcb7..2433856 100644
--- a/src/openrouter/operations/getgeneration.py
+++ b/src/openrouter/operations/getgeneration.py
@@ -211,6 +211,8 @@ class GetGenerationDataTypedDict(TypedDict):
r"""Usage amount in USD"""
user_agent: Nullable[str]
r"""User-Agent header from the request"""
+ web_search_engine: Nullable[str]
+ r"""The resolved web search engine used for this generation (e.g. exa, firecrawl, parallel)"""
request_id: NotRequired[Nullable[str]]
r"""Unique identifier grouping all generations from a single API request"""
session_id: NotRequired[Nullable[str]]
@@ -328,6 +330,9 @@ class GetGenerationData(BaseModel):
user_agent: Nullable[str]
r"""User-Agent header from the request"""
+ web_search_engine: Nullable[str]
+ r"""The resolved web search engine used for this generation (e.g. exa, firecrawl, parallel)"""
+
request_id: OptionalNullable[str] = UNSET
r"""Unique identifier grouping all generations from a single API request"""
@@ -369,6 +374,7 @@ def serialize_model(self, handler):
"upstream_id",
"upstream_inference_cost",
"user_agent",
+ "web_search_engine",
]
null_default_fields = []
diff --git a/uv.lock b/uv.lock
index f7af3d4..72f33e0 100644
--- a/uv.lock
+++ b/uv.lock
@@ -220,7 +220,7 @@ wheels = [
[[package]]
name = "openrouter"
-version = "0.9.1"
+version = "0.9.2"
source = { editable = "." }
dependencies = [
{ name = "httpcore" },