get all expiries and contracts
import os
import httpx
import polars as pl
from rich import print as rprint
uw_token = os.getenv('UW_TOKEN') # Set this to your own token 'abc123etc'
headers = {'Accept': 'application/json, text/plain', 'Authorization': uw_token}
We are tasked with collecting all contracts across the entire options montage for a heavily-traded name, MSTR in this example. Our first step is collecting all expiries via the stock/Volume & OI per Expiry endpoint:
https://api.unusualwhales.com/docs#/operations/PublicApi.TickerController.vol_oi_per_expiry
ticker = 'MSTR'
vol_oi_url = f'https://api.unusualwhales.com/api/stock/{ticker}/option/volume-oi-expiry'
vol_oi_rsp = httpx.get(vol_oi_url, headers=headers)
vol_oi_rsp.status_code
200
200, success! If we were to call vol_oi_rsp.json() we would get the following data:
>>> vol_oi_rsp.json()
{
'data': [
{'expires': '2025-01-31', 'oi': 438322, 'volume': 329297},
{'expires': '2025-02-07', 'oi': 60345, 'volume': 44701},
...
{'expires': '2027-06-17', 'oi': 0, 'volume': 293}
]
}
And for the time being we are only interested in the expiration dates, so we will collect those strings into a list:
expiry_dates = [d['expires'] for d in vol_oi_rsp.json()['data']]
rprint(expiry_dates)
[ '2025-01-31', '2025-02-07', '2025-02-14', '2025-02-21', '2025-02-28', '2025-03-07', '2025-03-21', '2025-04-17', '2025-06-20', '2025-07-18', '2025-09-19', '2025-12-19', '2026-01-16', '2026-06-18', '2026-12-18', '2027-01-15', '2027-06-17' ]
Nice, our expiry_dates list now contains:
>>> expiry_dates
['2025-01-31', '2025-02-07', ... '2027-06-17']
We can use these values to loop through the option-contract/Option contracts endpoint, collecting observations as we go and paginating through responses til an empty json response occurs before moving on to the next expiry:
https://api.unusualwhales.com/docs#/operations/PublicApi.OptionContractController.option_contracts
I want to add the convenient YYYY-MM-DD date to each
observations, which we could do by parsing it out of the
option_symbol
field, but since I intend on plotting
this data (eventually) I will instead create polars DataFrames
and add my convenience columns there.
We already know what the response looks like from this endpoint:
{
"data": [
{
"ask_volume": 56916,
"avg_price": "0.77927817593516586531",
"bid_volume": 68967,
"floor_volume": 1815,
"high_price": "5.75",
"implied_volatility": "0.542805337797143",
"last_price": "0.01",
"low_price": "0.01",
"mid_volume": 6393,
"multi_leg_volume": 9871,
"nbbo_ask": "0.01",
"nbbo_bid": "0",
"no_side_volume": 6393,
"open_interest": 22868,
"option_symbol": "AAPL240202P00185000",
"prev_oi": 20217,
"stock_multi_leg_volume": 13,
"sweep_volume": 12893,
"total_premium": "10307980.00",
"volume": 132276
},
...
]
}
Cool, but maybe we want to aggregate in some other interesting
ways, so let's add the expiry, page number (results are
paginated, you will see in the while
loop), strike
price, and option type (call or put).
First, as an exercise, let's knock this out by interacting only with dictionaries, then let's complete this task using DataFrames.
######################
# Using dictionaries #
######################
rsp_data = []
oc_url = f'https://api.unusualwhales.com/api/stock/{ticker}/option-contracts'
for expiry in expiry_dates:
page_counter = 0
while True:
oc_params = {'expiry': expiry, 'page': page_counter}
oc_rsp = httpx.get(oc_url, headers=headers, params=oc_params)
oc_data = oc_rsp.json()['data']
if len(oc_data) == 0:
break
# Add to individual dictionaries
for d in oc_data:
option_symbol = d['option_symbol']
option_strike = float(option_symbol[-8:]) / 1000
option_type = option_symbol[-9]
d['expiry'] = expiry
d['page'] = page_counter
d['option_strike'] = option_strike
d['option_type'] = option_type
rsp_data.append(d)
page_counter += 1
rprint(rsp_data[0])
rprint(f'Contract total: {len(rsp_data)}')
{ 'ask_volume': 2329, 'avg_price': '11.472617262797172842150353388', 'bid_volume': 2245, 'floor_volume': 20, 'high_price': '14.75', 'implied_volatility': '0.935893772235936', 'last_price': '13.50', 'low_price': '9.00', 'mid_volume': 95, 'multi_leg_volume': 199, 'nbbo_ask': '13.75', 'nbbo_bid': '13.35', 'no_side_volume': 0, 'open_interest': 3443, 'option_symbol': 'MSTR250131P00340000', 'prev_oi': 3124, 'stock_multi_leg_volume': 0, 'sweep_volume': 515, 'total_premium': '5356565.00', 'volume': 4669, 'expiry': '2025-01-31', 'page': 0, 'option_strike': 340.0, 'option_type': 'P' }
Contract total: 5626
Now let's repeat using DataFrames:
###########################
# Using polars DataFrames #
###########################
rsp_dfs = []
oc_url = f'https://api.unusualwhales.com/api/stock/{ticker}/option-contracts'
for expiry in expiry_dates:
page_counter = 0
while True:
oc_params = {'expiry': expiry, 'page': page_counter}
oc_rsp = httpx.get(oc_url, headers=headers, params=oc_params)
oc_data = oc_rsp.json()['data']
if len(oc_data) == 0:
break
# Construct dataframe
raw_df = pl.DataFrame(oc_data)
df = (
raw_df
.with_columns(
pl.lit(expiry).alias('expiry'),
pl.lit(page_counter).alias('page')
)
.with_columns(
pl.col('option_symbol').str.slice(-9, 1).alias('option_type'),
((pl.col('option_symbol').str.slice(-8).cast(pl.Float64)) / 1000).alias('strike_price'),
)
.sort(['strike_price', 'option_type'], descending=[False, False])
)
rsp_dfs.append(df)
page_counter += 1
raw_df = pl.concat(rsp_dfs)
raw_df
ask_volume | avg_price | bid_volume | floor_volume | high_price | implied_volatility | last_price | low_price | mid_volume | multi_leg_volume | nbbo_ask | nbbo_bid | no_side_volume | open_interest | option_symbol | prev_oi | stock_multi_leg_volume | sweep_volume | total_premium | volume | expiry | page | option_type | strike_price |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
i64 | str | i64 | i64 | str | str | str | str | i64 | i64 | str | str | i64 | i64 | str | i64 | i64 | i64 | str | i64 | str | i32 | str | f64 |
0 | null | 0 | 0 | null | "6.385370372259… | null | null | 0 | 0 | null | null | 0 | 17 | "MSTR250131C000… | 2 | 0 | 0 | "0" | 0 | "2025-01-31" | 0 | "C" | 20.0 |
0 | null | 0 | 0 | null | "4.691617583615… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR250131P000… | 0 | 0 | 0 | "0" | 0 | "2025-01-31" | 0 | "P" | 20.0 |
0 | null | 0 | 0 | null | "6.385370372259… | null | null | 0 | 0 | null | null | 0 | 6 | "MSTR250131C000… | 4 | 0 | 0 | "0" | 0 | "2025-01-31" | 0 | "C" | 25.0 |
0 | null | 0 | 0 | null | "4.691617583615… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR250131P000… | 0 | 0 | 0 | "0" | 0 | "2025-01-31" | 0 | "P" | 25.0 |
0 | null | 0 | 0 | null | "6.385370372259… | null | null | 0 | 0 | null | null | 0 | 6 | "MSTR250131C000… | 5 | 0 | 0 | "0" | 0 | "2025-01-31" | 0 | "C" | 30.0 |
… | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … |
0 | null | 0 | 0 | null | "0.660327971872… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR270617P007… | 0 | 0 | 0 | "0" | 0 | "2027-06-17" | 0 | "P" | 720.0 |
0 | null | 0 | 0 | null | "0.889429094637… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR270617C007… | 0 | 0 | 0 | "0" | 0 | "2027-06-17" | 0 | "C" | 730.0 |
0 | null | 0 | 0 | null | "0.658294119234… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR270617P007… | 0 | 0 | 0 | "0" | 0 | "2027-06-17" | 0 | "P" | 730.0 |
0 | "108.20" | 4 | 0 | "108.20" | "0.890148039909… | "108.20" | "108.20" | 0 | 0 | "112.65" | "102.70" | 0 | 30 | "MSTR270617C007… | 0 | 0 | 0 | "43280.00" | 4 | "2027-06-17" | 0 | "C" | 740.0 |
0 | null | 0 | 0 | null | "0.656124167198… | null | null | 0 | 0 | null | null | 0 | 0 | "MSTR270617P007… | 0 | 0 | 0 | "0" | 0 | "2027-06-17" | 0 | "P" | 740.0 |
Nice! Always good to see that the results match, with 5626 dictionaries in our list vs. 5626 rows in our DataFrame.
From here, depending on your preference, you can move ahead with your plotting tasks (I would probably melt the polars DataFrame and plot a line for each expiration, but that's me).