replicate top net impact chart v2
import os
import httpx
import polars as pl
from lets_plot import *
LetsPlot.setup_html()
uw_token = os.getenv('UW_TOKEN') # Set this to your own token 'abc123etc'
headers = {'Accept': 'application/json, text/plain', 'Authorization': uw_token}
While the Unusual Whales API does not explicitly offer a "Top Not Impact Chart" endpoint, it can be replicated using the screener/Stock Screener endpoint:
https://api.unusualwhales.com/docs#/operations/PublicApi.ScreenerController.stock_screener
(Personal note from Dan: this endpoint is criminally underused, it is the swiss army knife of filtering.)
It takes a lot of scrolling, but it's worth it; make your way
all the way down to the order
query param and you
will see what I mean.
Let's start by collecting tickers with the most positive net premiums:
stock_screener_url = f'https://api.unusualwhales.com/api/screener/stocks'
positive_stock_screener_params = {
'order': 'net_premium',
'order_direction': 'desc'
}
positive_stock_screener_rsp = httpx.get(
stock_screener_url,
headers=headers,
params=positive_stock_screener_params
)
positive_stock_screener_rsp.status_code
200
200 (success)! The response data is quite rich so I am going to show a single dictionary:
>>> positive_stock_screener_rsp.json()
{'data': [{'realized_volatility': '0.563447697840083050324876454521547081094166016343785413',
'prev_put_oi': 2996216,
'implied_move_perc': '0.01089659925315411',
'sector': 'Consumer Cyclical',
'er_time': 'unknown',
'iv30d_1m': '0.738680259214912',
'gex_ratio': '0.65156405255265214224',
'call_premium': '1080373705.0000',
'call_volume_bid_side': 478486,
'gex_perc_change': '-0.20913732383544494528',
'iv_rank': '36.73131555016125943100',
'put_volume_ask_side': 348552,
'is_index': False,
'put_volume_bid_side': 382649,
'ticker': 'TSLA',
'prev_call_oi': 3381353,
'issue_type': 'Common Stock',
'week_52_high': '488.5399',
'put_open_interest': 3121548,
'total_open_interest': 6663500,
'net_put_premium': '-28918236.0000',
'iv30d_1d': '0.550519033360857',
'put_premium': '415992376.0000',
'avg_30_day_put_oi': '3787810.733333333333',
'put_call_ratio': '0.73449647482806202411',
'call_volume_ask_side': 532392,
'bullish_premium': '769269819.0000',
'prev_put_volume': 1253761,
'date': '2025-01-31',
'marketcap': '1287507439570',
'stock_volume': 43706863,
'iv30d_1w': '0.619570354162097',
'high': '419.98',
'avg_30_day_call_volume': '1395347.833333333333',
'next_earnings_date': '2025-04-22',
'call_volume': 1099379,
'avg_3_day_put_volume': '724170.666666666667',
'gex_net_change': '245964.1694',
'volatility': '0.766025536680471',
'close': '417.9',
'avg_7_day_put_volume': '692206.428571428571',
'avg_30_day_call_oi': '3897124.133333333333',
'avg_3_day_call_volume': '950225.000000000000',
'variance_risk_premium': '0.156828277880789949675123545478452918905833983656214587',
'prev_call_volume': 1668565,
'next_dividend_date': None,
'low': '401.425',
'avg30_volume': '65061415.263157894737',
'full_name': 'TESLA INC',
'avg_30_day_put_volume': '948770.233333333333',
'net_call_premium': '103844719.0000',
'call_open_interest': 3541952,
'bearish_premium': '619841697.0000',
'put_volume': 807490,
'iv30d': '0.531942662915937',
'week_52_low': '138.8025',
'implied_move': '4.56033575343753',
'avg_7_day_call_volume': '958170.142857142857',
'prev_close': '400.28'},
Phew!
Let's concentrate on the net_call_premium
and
net_put_premium
fields, since this is the driver
for the chart in question. I am going to use a polars DataFrame
to make bulk operations on this data easier:
excluded_tickers = ['SPX', 'SPXW', 'NDX', 'NDXP', 'VIX', 'VIXW']
raw_positive_df = pl.DataFrame(positive_stock_screener_rsp.json()['data'])
clean_positive_df = (
raw_positive_df
.select(['ticker', 'net_call_premium', 'net_put_premium'])
.with_columns(
pl.col('net_call_premium').cast(pl.Float64),
pl.col('net_put_premium').cast(pl.Float64)
)
.filter(
~pl.col('ticker').is_in(excluded_tickers) # remove indexes
)
)
clean_positive_df
ticker | net_call_premium | net_put_premium |
---|---|---|
str | f64 | f64 |
"AMD" | 8.801619e6 | -1.604592e6 |
"ASML" | 9.528523e6 | -114466.0 |
"MSFT" | 6.015774e6 | -2.438648e6 |
"RUT" | 2.513981e6 | -4.337194e6 |
"SQQQ" | 815688.0 | -4.848594e6 |
… | … | … |
"GEV" | 785940.0 | -533015.0 |
"MPC" | 1.287107e6 | -16696.0 |
"ANET" | 877860.0 | -395132.0 |
"IGV" | 88467.0 | -1.16075e6 |
"PINS" | 1.477506e6 | 245207.0 |
OK great, you are just going to have to take my word for it unfortunately but I can confirm that the top 5 match what is currently shown on the site (Monday May 5h, 2025, at about 2:00PM Chicago time) so let's move on.
With the net positive tickers handled our next task is to collect the net negative tickers, and again we will lean on the screener/Stock Screener endpoint:
https://api.unusualwhales.com/docs#/operations/PublicApi.ScreenerController.stock_screener
but this time we will get the net_premium
results
in ascending order instead of descending order:
stock_screener_url = f'https://api.unusualwhales.com/api/screener/stocks'
negative_stock_screener_params = {
'order': 'net_premium',
'order_direction': 'asc'
}
negative_stock_screener_rsp = httpx.get(
stock_screener_url,
headers=headers,
params=negative_stock_screener_params
)
negative_stock_screener_rsp.status_code
200
200 (success) again! Let's apply the same treatment as above to the new, negative results:
excluded_tickers = ['SPX', 'SPXW', 'NDX', 'NDXP', 'VIX', 'VIXW', 'RUT', 'RUTW']
raw_negative_df = pl.DataFrame(negative_stock_screener_rsp.json()['data'])
clean_negative_df = (
raw_negative_df
.select(['ticker', 'net_call_premium', 'net_put_premium'])
.with_columns(
pl.col('net_call_premium').cast(pl.Float64),
pl.col('net_put_premium').cast(pl.Float64)
)
.filter(
~pl.col('ticker').is_in(excluded_tickers) # remove indexes
)
)
clean_negative_df
ticker | net_call_premium | net_put_premium |
---|---|---|
str | f64 | f64 |
"TSLA" | -2.1326755e7 | 1.638485e7 |
"MSTR" | -1.4347287e7 | 7.215794e6 |
"AAPL" | -1.2992526e7 | 6.631852e6 |
"BRKB" | -9.20836e6 | 4.486791e6 |
"PLTR" | -7.678367e6 | 3.429367e6 |
… | … | … |
"TGTX" | -1.206063e6 | -178251.0 |
"NKE" | -621308.0 | 352309.0 |
"MSTU" | -1.029072e6 | -80660.0 |
"EQIX" | -912622.0 | 28614.0 |
"SPXL" | -743553.0 | 156157.0 |
Again, you will just have to take more word for it, but the order of these tickers (SPY, QQQ, IWM, MSTR, UNH, etc.) matches the site exactly.
Let's take the top 10 tickers from each DataFrame, combine them
into a single DataFrame (for easier plotting), add a
positive
or negative
value into a
"side" column (also for easier plotting), and calculate a
net_premium
field so we have one final descriptive
value for each ticker:
top_10_clean_pos_df = (
clean_positive_df
.head(10)
.with_columns(
pl.lit('positive').alias('side')
)
)
top_10_clean_neg_df = (
clean_negative_df
.head(10)
.with_columns(
pl.lit('negative').alias('side')
)
)
raw_final_df = pl.concat([top_10_clean_pos_df, top_10_clean_neg_df])
plot_ready_df = (
raw_final_df
.with_columns(
(pl.col('net_call_premium') - pl.col('net_put_premium')).alias('net_premium')
)
.select(['ticker', 'net_premium', 'side'])
.sort('net_premium', descending=True)
)
plot_ready_df
ticker | net_premium | side |
---|---|---|
str | f64 | str |
"AMD" | 1.0406211e7 | "positive" |
"ASML" | 9.642989e6 | "positive" |
"MSFT" | 8.454422e6 | "positive" |
"RUT" | 6.851175e6 | "positive" |
"SQQQ" | 5.664282e6 | "positive" |
… | … | … |
"PLTR" | -1.1107734e7 | "negative" |
"BRKB" | -1.3695151e7 | "negative" |
"AAPL" | -1.9624378e7 | "negative" |
"MSTR" | -2.1563081e7 | "negative" |
"TSLA" | -3.7711605e7 | "negative" |
At last, we are ready to plot the results (with some extra styling):
UW_DARK_THEME = {
'red': '#dc3545',
'yellow': '#ffc107',
'teal': '#20c997',
'black': '#161c2d',
'gray_medium': '#748196',
'gray_light': '#f9fbfd',
}
def uw_dark_theme(colors: dict, show_legend: bool=True) -> theme:
"""Create a dark theme for lets-plot using UW colors."""
t = theme_none() + theme(
plot_background=element_rect(fill=colors['black']),
panel_background=element_rect(fill=colors['black']),
panel_grid_major=element_blank(),
panel_grid_minor=element_blank(),
axis_ontop=True,
axis_ticks=element_blank(),
axis_tooltip=element_rect(color=colors['gray_light']),
tooltip=element_rect(color=colors['gray_light'], fill=colors['black']),
line=element_line(color=colors['gray_medium'], size=1),
rect=element_rect(color=colors['black'], fill=colors['black'], size=2),
text=element_text(color=colors['gray_light'], size=10),
legend_background=element_rect(color=colors['gray_light'], fill=colors['black'], size=2),
plot_title=element_text(hjust=0.5, size=16, color=colors['gray_light']),
)
if show_legend:
return t + theme(legend_position='bottom')
else:
return t + theme(legend_position='none')
color_mapping = {
'positive': UW_DARK_THEME['teal'],
'negative': UW_DARK_THEME['red'],
}
top_net_impact_plot = (
ggplot(
plot_ready_df.sort('net_premium', descending=False),
aes(x='net_premium', y='ticker')
)
+ geom_bar(aes(fill='side'), stat='identity', size=0.5)
+ scale_fill_manual(values=color_mapping)
+ ggtitle('Top Net Impact Replica')
+ uw_dark_theme(UW_DARK_THEME, show_legend=False)
)
top_net_impact_plot.show()