Skip to content

feat: add gemini-2.0-flash-001 and gemini-2.0-flash-lite-001 to fine tune score endponts and multimodal endpoints #1650

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Apr 29, 2025
8 changes: 6 additions & 2 deletions bigframes/ml/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,16 @@
_GEMINI_FINE_TUNE_SCORE_ENDPOINTS = (
_GEMINI_1P5_PRO_002_ENDPOINT,
_GEMINI_1P5_FLASH_002_ENDPOINT,
_GEMINI_2_FLASH_001_ENDPOINT,
_GEMINI_2_FLASH_LITE_001_ENDPOINT,
)
_GEMINI_MULTIMODAL_ENDPOINTS = (
_GEMINI_1P5_PRO_001_ENDPOINT,
_GEMINI_1P5_PRO_002_ENDPOINT,
_GEMINI_1P5_FLASH_001_ENDPOINT,
_GEMINI_1P5_FLASH_002_ENDPOINT,
_GEMINI_2_FLASH_EXP_ENDPOINT,
_GEMINI_2_FLASH_001_ENDPOINT,
)

_CLAUDE_3_SONNET_ENDPOINT = "claude-3-sonnet"
Expand Down Expand Up @@ -712,7 +715,8 @@ def score(
] = "text_generation",
) -> bigframes.dataframe.DataFrame:
"""Calculate evaluation metrics of the model. Only support
"gemini-1.5-pro-002", and "gemini-1.5-flash-002".
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
"gemini-2.0-flash-lite-001", and "gemini-2.0-flash-001".

.. note::

Expand Down Expand Up @@ -746,7 +750,7 @@ def score(

if self.model_name not in _GEMINI_FINE_TUNE_SCORE_ENDPOINTS:
raise NotImplementedError(
"score() only supports gemini-1.5-pro-002, and gemini-1.5-flash-2 model."
"score() only supports gemini-1.5-pro-002, gemini-1.5-flash-2, gemini-2.0-flash-001, and gemini-2.0-flash-lite-001 model."
)

X, y = utils.batch_convert_to_dataframe(X, y, session=self._bqml_model.session)
Expand Down
10 changes: 10 additions & 0 deletions tests/system/small/ml/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,8 @@ def test_create_load_gemini_text_generator_model(
"gemini-1.5-flash-001",
"gemini-1.5-flash-002",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
),
)
@pytest.mark.flaky(retries=2)
Expand All @@ -177,6 +179,8 @@ def test_gemini_text_generator_predict_default_params_success(
"gemini-1.5-flash-001",
"gemini-1.5-flash-002",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
),
)
@pytest.mark.flaky(retries=2)
Expand Down Expand Up @@ -204,6 +208,8 @@ def test_gemini_text_generator_predict_with_params_success(
"gemini-1.5-flash-001",
"gemini-1.5-flash-002",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
),
)
@pytest.mark.flaky(retries=2)
Expand Down Expand Up @@ -764,6 +770,8 @@ def test_text_embedding_generator_retry_no_progress(session, bq_connection):
(
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
),
)
def test_llm_gemini_score(llm_fine_tune_df_default_index, model_name):
Expand Down Expand Up @@ -792,6 +800,8 @@ def test_llm_gemini_score(llm_fine_tune_df_default_index, model_name):
(
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
),
)
def test_llm_gemini_pro_score_params(llm_fine_tune_df_default_index, model_name):
Expand Down
1 change: 1 addition & 0 deletions tests/system/small/ml/test_multimodal_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def test_multimodal_embedding_generator_predict_default_params_success(
"gemini-1.5-flash-001",
"gemini-1.5-flash-002",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-001",
),
)
@pytest.mark.flaky(retries=2)
Expand Down