import datetime
import os
import pprint
from time import sleep
import altair as alt
import duckdb
import geopandas as gpd
import gspread
import matplotlib.pyplot as plt
import pandas as pd
import pyarrow.compute as pc
import xarray as xr
from google.auth import default
from google.auth.transport.requests import Request
from google.cloud import bigquery
from google.colab import auth, drive, runtime, sheets, userdata
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
env_vars = ["GEMINI_API_KEY", "GOOGLE_API_KEY", "HF_TOKEN", "VERTEXAI_API_KEY", "YOUTUBE_DATA_API_KEY"]
for env_var in env_vars:
os.environ[env_var] = userdata.get(env_var)
drive.mount("/content/drive")
auth.authenticate_user()
creds, _ = default()
gc = gspread.authorize(creds)
drive_service = build("drive", "v3", credentials=creds)
docs_service = build("docs", "v1", credentials=creds)
sheets_service = build("sheets", "v4", credentials=creds)
slides_service = build("slides", "v1", credentials=creds)
!uv pip install google-genai
from google import genai
from google.genai import types
google_genai_client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
def write_sheet(sheet_id: str, df: pd.DataFrame, sheet_name: str = "Sheet1") -> None:
# 1. Fetch Sheet Metadata first to check if the tab exists
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=sheet_id).execute()
sheet_idx = None
sheet_exists = False
# Loop through sheets to see if our 'sheet_name' is there
for sheet in spreadsheet.get('sheets', []):
if sheet['properties']['title'] == sheet_name:
sheet_idx = sheet['properties']['sheetId']
sheet_exists = True
break
# 2. Logic Fork: Create if missing, Clear if exists
if not sheet_exists:
# Create the sheet using batchUpdate -> addSheet
req_body = {
"requests": [
{
"addSheet": {
"properties": {
"title": sheet_name
}
}
}
]
}
response = sheets_service.spreadsheets().batchUpdate(
spreadsheetId=sheet_id, body=req_body
).execute()
# Capture the new sheet_idx from the creation response
# (We need this later for the freezing step)
sheet_idx = response['replies'][0]['addSheet']['properties']['sheetId']
else:
# If it exists, clear the previous data
sheets_service.spreadsheets().values().clear(
spreadsheetId=sheet_id, range=sheet_name
).execute()
# 3. Write the Data
# Note: Ensure df does not contain NaNs/Infs as JSON cannot handle them.
# df.fillna('', inplace=True) is often useful here.
body = {"values": [df.columns.values.tolist()] + df.values.tolist()}
sheets_service.spreadsheets().values().update(
spreadsheetId=sheet_id,
range=sheet_name,
valueInputOption="USER_ENTERED",
body=body,
).execute()
# 4. Freeze the top row
# We are guaranteed to have 'sheet_idx' now (either from fetching or creating)
if sheet_idx is not None:
requests = [
{
"updateSheetProperties": {
"properties": {
"sheetId": sheet_idx,
"gridProperties": {
"frozenRowCount": 1
}
},
"fields": "gridProperties.frozenRowCount",
}
}
]
sheets_service.spreadsheets().batchUpdate(
spreadsheetId=sheet_id, body={"requests": requests}
).execute()
return None
from google.colab import userdata
API_KEY = userdata.get("API_KEY")
!mkdir ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
from google.colab import files
files.download("best_model_HDF5_format.h5")
Click on RAM/disk on right to change to a GPU
streamlit
# Write streamlit code then
!streamlit run /usr/local/lib/python3.11/dist-packages/colab_kernel_launcher.py
!pip install -q -U PACKAGE
!pip install -q condacolab
import condacolab
condacolab.install()