"""SQL-style merge routines"""from__future__importannotationsfromcollections.abcimport(Hashable,Sequence,)importdatetimefromfunctoolsimportpartialfromtypingimport(TYPE_CHECKING,Literal,cast,final,)importuuidimportwarningsimportnumpyasnpfrompandas._libsimport(Timedelta,hashtableaslibhashtable,joinaslibjoin,lib,)frompandas._libs.libimportis_range_indexerfrompandas._typingimport(AnyArrayLike,ArrayLike,IndexLabel,JoinHow,MergeHow,Shape,Suffixes,npt,)frompandas.errorsimportMergeErrorfrompandas.util._decoratorsimportcache_readonlyfrompandas.util._exceptionsimportfind_stack_levelfrompandas.core.dtypes.baseimportExtensionDtypefrompandas.core.dtypes.castimportfind_common_typefrompandas.core.dtypes.commonimport(ensure_int64,ensure_object,is_bool,is_bool_dtype,is_float_dtype,is_integer,is_integer_dtype,is_list_like,is_number,is_numeric_dtype,is_object_dtype,is_string_dtype,needs_i8_conversion,)frompandas.core.dtypes.dtypesimport(CategoricalDtype,DatetimeTZDtype,)frompandas.core.dtypes.genericimport(ABCDataFrame,ABCSeries,)frompandas.core.dtypes.missingimport(isna,na_value_for_dtype,)frompandasimport(ArrowDtype,Categorical,Index,MultiIndex,Series,)importpandas.core.algorithmsasalgosfrompandas.core.arraysimport(ArrowExtensionArray,BaseMaskedArray,ExtensionArray,)frompandas.core.arrays.string_importStringDtypeimportpandas.core.commonascomfrompandas.core.constructionimport(ensure_wrapped_if_datetimelike,extract_array,)frompandas.core.indexes.apiimportdefault_indexfrompandas.core.sortingimport(get_group_index,is_int64_overflow_possible,)ifTYPE_CHECKING:frompandasimportDataFramefrompandas.coreimportgroupbyfrompandas.core.arraysimportDatetimeArrayfrompandas.core.indexes.frozenimportFrozenList_factorizers={np.int64:libhashtable.Int64Factorizer,np.longlong:libhashtable.Int64Factorizer,np.int32:libhashtable.Int32Factorizer,np.int16:libhashtable.Int16Factorizer,np.int8:libhashtable.Int8Factorizer,np.uint64:libhashtable.UInt64Factorizer,np.uint32:libhashtable.UInt32Factorizer,np.uint16:libhashtable.UInt16Factorizer,np.uint8:libhashtable.UInt8Factorizer,np.bool_:libhashtable.UInt8Factorizer,np.float64:libhashtable.Float64Factorizer,np.float32:libhashtable.Float32Factorizer,np.complex64:libhashtable.Complex64Factorizer,np.complex128:libhashtable.Complex128Factorizer,np.object_:libhashtable.ObjectFactorizer,}# See https://github.com/pandas-dev/pandas/issues/52451ifnp.intcisnotnp.int32:_factorizers[np.intc]=libhashtable.Int64Factorizer_known=(np.ndarray,ExtensionArray,Index,ABCSeries)
[文档]defmerge(left:DataFrame|Series,right:DataFrame|Series,how:MergeHow="inner",on:IndexLabel|AnyArrayLike|None=None,left_on:IndexLabel|AnyArrayLike|None=None,right_on:IndexLabel|AnyArrayLike|None=None,left_index:bool=False,right_index:bool=False,sort:bool=False,suffixes:Suffixes=("_x","_y"),copy:bool|lib.NoDefault=lib.no_default,indicator:str|bool=False,validate:str|None=None,)->DataFrame:""" Merge DataFrame or named Series objects with a database-style join. A named Series object is treated as a DataFrame with a single named column. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. When performing a cross merge, no column specifications to merge on are allowed. .. warning:: If both key columns contain rows where the key is a null value, those rows will be matched against each other. This is different from usual SQL join behaviour and can lead to unexpected results. Parameters ---------- left : DataFrame or named Series First pandas object to merge. right : DataFrame or named Series Second pandas object to merge. how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. * cross: creates the cartesian product from both frames, preserves the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. copy : bool, default False If False, avoid copy if possible. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` .. deprecated:: 3.0.0 indicator : bool or str, default False If True, adds a column to the output DataFrame called "_merge" with information on the source of each row. The column can be given a different name by providing a string argument. The column will have a Categorical type with the value of "left_only" for observations whose merge key only appears in the left DataFrame, "right_only" for observations whose merge key only appears in the right DataFrame, and "both" if the observation's merge key is found in both DataFrames. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Examples -------- >>> df1 = pd.DataFrame( ... {"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]} ... ) >>> df2 = pd.DataFrame( ... {"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]} ... ) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on="lkey", right_on="rkey") lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 bar 2 bar 6 3 baz 3 baz 7 4 foo 5 foo 5 5 foo 5 foo 8 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on="lkey", right_on="rkey", suffixes=("_left", "_right")) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 bar 2 bar 6 3 baz 3 baz 7 4 foo 5 foo 5 5 foo 5 foo 8 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on="lkey", right_on="rkey", suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') >>> df1 = pd.DataFrame({"a": ["foo", "bar"], "b": [1, 2]}) >>> df2 = pd.DataFrame({"a": ["foo", "baz"], "c": [3, 4]}) >>> df1 a b 0 foo 1 1 bar 2 >>> df2 a c 0 foo 3 1 baz 4 >>> df1.merge(df2, how="inner", on="a") a b c 0 foo 1 3 >>> df1.merge(df2, how="left", on="a") a b c 0 foo 1 3.0 1 bar 2 NaN >>> df1 = pd.DataFrame({"left": ["foo", "bar"]}) >>> df2 = pd.DataFrame({"right": [7, 8]}) >>> df1 left 0 foo 1 bar >>> df2 right 0 7 1 8 >>> df1.merge(df2, how="cross") left right 0 foo 7 1 foo 8 2 bar 7 3 bar 8 """left_df=_validate_operand(left)left._check_copy_deprecation(copy)right_df=_validate_operand(right)ifhow=="cross":return_cross_merge(left_df,right_df,on=on,left_on=left_on,right_on=right_on,left_index=left_index,right_index=right_index,sort=sort,suffixes=suffixes,indicator=indicator,validate=validate,)else:op=_MergeOperation(left_df,right_df,how=how,on=on,left_on=left_on,right_on=right_on,left_index=left_index,right_index=right_index,sort=sort,suffixes=suffixes,indicator=indicator,validate=validate,)returnop.get_result()
def_cross_merge(left:DataFrame,right:DataFrame,on:IndexLabel|AnyArrayLike|None=None,left_on:IndexLabel|AnyArrayLike|None=None,right_on:IndexLabel|AnyArrayLike|None=None,left_index:bool=False,right_index:bool=False,sort:bool=False,suffixes:Suffixes=("_x","_y"),indicator:str|bool=False,validate:str|None=None,)->DataFrame:""" See merge.__doc__ with how='cross' """if(left_indexorright_indexorright_onisnotNoneorleft_onisnotNoneoronisnotNone):raiseMergeError("Can not pass on, right_on, left_on or set right_index=True or ""left_index=True")cross_col=f"_cross_{uuid.uuid4()}"left=left.assign(**{cross_col:1})right=right.assign(**{cross_col:1})left_on=right_on=[cross_col]res=merge(left,right,how="inner",on=on,left_on=left_on,right_on=right_on,left_index=left_index,right_index=right_index,sort=sort,suffixes=suffixes,indicator=indicator,validate=validate,)delres[cross_col]returnresdef_groupby_and_merge(by,left:DataFrame|Series,right:DataFrame|Series,merge_pieces):""" groupby & merge; we are always performing a left-by type operation Parameters ---------- by: field to group left: DataFrame right: DataFrame merge_pieces: function for merging """pieces=[]ifnotisinstance(by,(list,tuple)):by=[by]lby=left.groupby(by,sort=False)rby:groupby.DataFrameGroupBy|groupby.SeriesGroupBy|None=None# if we can groupby the rhs# then we can get vastly better perfifall(iteminright.columnsforiteminby):rby=right.groupby(by,sort=False)forkey,lhsinlby._grouper.get_iterator(lby._selected_obj):ifrbyisNone:rhs=rightelse:try:rhs=right.take(rby.indices[key])exceptKeyError:# key doesn't exist in leftlcols=lhs.columns.tolist()cols=lcols+[rforrinright.columnsifrnotinset(lcols)]merged=lhs.reindex(columns=cols)merged.index=range(len(merged))pieces.append(merged)continuemerged=merge_pieces(lhs,rhs)# make sure join keys are in the merged# TODO, should merge_pieces do this?merged[by]=keypieces.append(merged)# preserve the original order# if we have a missing piece this can be resetfrompandas.core.reshape.concatimportconcatresult=concat(pieces,ignore_index=True)result=result.reindex(columns=pieces[0].columns)returnresult,lby
[文档]defmerge_ordered(left:DataFrame|Series,right:DataFrame|Series,on:IndexLabel|None=None,left_on:IndexLabel|None=None,right_on:IndexLabel|None=None,left_by=None,right_by=None,fill_method:str|None=None,suffixes:Suffixes=("_x","_y"),how:JoinHow="outer",)->DataFrame:""" Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series First pandas object to merge. right : DataFrame or named Series Second pandas object to merge. on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"], ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0 """def_merger(x,y)->DataFrame:# perform the ordered merge operationop=_OrderedMerge(x,y,on=on,left_on=left_on,right_on=right_on,suffixes=suffixes,fill_method=fill_method,how=how,)returnop.get_result()ifleft_byisnotNoneandright_byisnotNone:raiseValueError("Can only group either left or right frames")ifleft_byisnotNone:ifisinstance(left_by,str):left_by=[left_by]check=set(left_by).difference(left.columns)iflen(check)!=0:raiseKeyError(f"{check} not found in left columns")result,_=_groupby_and_merge(left_by,left,right,lambdax,y:_merger(x,y))elifright_byisnotNone:ifisinstance(right_by,str):right_by=[right_by]check=set(right_by).difference(right.columns)iflen(check)!=0:raiseKeyError(f"{check} not found in right columns")result,_=_groupby_and_merge(right_by,right,left,lambdax,y:_merger(y,x))else:result=_merger(left,right)returnresult
[文档]defmerge_asof(left:DataFrame|Series,right:DataFrame|Series,on:IndexLabel|None=None,left_on:IndexLabel|None=None,right_on:IndexLabel|None=None,left_index:bool=False,right_index:bool=False,by=None,left_by=None,right_by=None,suffixes:Suffixes=("_x","_y"),tolerance:int|datetime.timedelta|None=None,allow_exact_matches:bool=True,direction:str="backward",)->DataFrame:""" Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series First pandas object to merge. right : DataFrame or named Series Second pandas object to merge. on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075"), ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT", ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100], ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False, ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN """op=_AsOfMerge(left,right,on=on,left_on=left_on,right_on=right_on,left_index=left_index,right_index=right_index,by=by,left_by=left_by,right_by=right_by,suffixes=suffixes,how="asof",tolerance=tolerance,allow_exact_matches=allow_exact_matches,direction=direction,)returnop.get_result()
# TODO: transformations??class_MergeOperation:""" Perform a database (SQL) merge operation between two DataFrame or Series objects using either columns as keys or their row indexes """_merge_type="merge"how:JoinHow|Literal["asof"]on:IndexLabel|None# left_on/right_on may be None when passed, but in validate_specification# get replaced with non-None.left_on:Sequence[Hashable|AnyArrayLike]right_on:Sequence[Hashable|AnyArrayLike]left_index:boolright_index:boolsort:boolsuffixes:Suffixesindicator:str|boolvalidate:str|Nonejoin_names:list[Hashable]right_join_keys:list[ArrayLike]left_join_keys:list[ArrayLike]def__init__(self,left:DataFrame|Series,right:DataFrame|Series,how:JoinHow|Literal["asof"]="inner",on:IndexLabel|AnyArrayLike|None=None,left_on:IndexLabel|AnyArrayLike|None=None,right_on:IndexLabel|AnyArrayLike|None=None,left_index:bool=False,right_index:bool=False,sort:bool=True,suffixes:Suffixes=("_x","_y"),indicator:str|bool=False,validate:str|None=None,)->None:_left=_validate_operand(left)_right=_validate_operand(right)self.left=self.orig_left=_leftself.right=self.orig_right=_rightself.how=howself.on=com.maybe_make_list(on)self.suffixes=suffixesself.sort=sortorhow=="outer"self.left_index=left_indexself.right_index=right_indexself.indicator=indicatorifnotis_bool(left_index):raiseValueError(f"left_index parameter must be of type bool, not {type(left_index)}")ifnotis_bool(right_index):raiseValueError(f"right_index parameter must be of type bool, not {type(right_index)}")# GH 40993: raise when merging between different levels; enforced in 2.0if_left.columns.nlevels!=_right.columns.nlevels:msg=("Not allowed to merge between different levels. "f"({_left.columns.nlevels} levels on the left, "f"{_right.columns.nlevels} on the right)")raiseMergeError(msg)# GH 59435: raise when "how" is not a valid Merge typemerge_type={"left","right","inner","outer","cross","asof"}ifhownotinmerge_type:raiseValueError(f"'{how}' is not a valid Merge type: "f"left, right, inner, outer, cross, asof")self.left_on,self.right_on=self._validate_left_right_on(left_on,right_on)(self.left_join_keys,self.right_join_keys,self.join_names,left_drop,right_drop,)=self._get_merge_keys()ifleft_drop:self.left=self.left._drop_labels_or_levels(left_drop)ifright_drop:self.right=self.right._drop_labels_or_levels(right_drop)self._maybe_require_matching_dtypes(self.left_join_keys,self.right_join_keys)self._validate_tolerance(self.left_join_keys)# validate the merge keys dtypes. We may need to coerce# to avoid incompatible dtypesself._maybe_coerce_merge_keys()# If argument passed to validate,# check if columns specified as unique# are in fact unique.ifvalidateisnotNone:self._validate_validate_kwd(validate)def_maybe_require_matching_dtypes(self,left_join_keys:list[ArrayLike],right_join_keys:list[ArrayLike])->None:# Overridden by AsOfMergepassdef_validate_tolerance(self,left_join_keys:list[ArrayLike])->None:# Overridden by AsOfMergepass@finaldef_reindex_and_concat(self,join_index:Index,left_indexer:npt.NDArray[np.intp]|None,right_indexer:npt.NDArray[np.intp]|None,)->DataFrame:""" reindex along index and concat along columns. """# Take views so we do not alter the originalsleft=self.left[:]right=self.right[:]llabels,rlabels=_items_overlap_with_suffix(self.left._info_axis,self.right._info_axis,self.suffixes)ifleft_indexerisnotNoneandnotis_range_indexer(left_indexer,len(left)):# Pinning the index here (and in the right code just below) is not# necessary, but makes the `.take` more performant if we have e.g.# a MultiIndex for left.index.lmgr=left._mgr.reindex_indexer(join_index,left_indexer,axis=1,only_slice=True,allow_dups=True,use_na_proxy=True,)left=left._constructor_from_mgr(lmgr,axes=lmgr.axes)left.index=join_indexifright_indexerisnotNoneandnotis_range_indexer(right_indexer,len(right)):rmgr=right._mgr.reindex_indexer(join_index,right_indexer,axis=1,only_slice=True,allow_dups=True,use_na_proxy=True,)right=right._constructor_from_mgr(rmgr,axes=rmgr.axes)right.index=join_indexfrompandasimportconcatleft.columns=llabelsright.columns=rlabelsresult=concat([left,right],axis=1)returnresultdefget_result(self)->DataFrame:ifself.indicator:self.left,self.right=self._indicator_pre_merge(self.left,self.right)join_index,left_indexer,right_indexer=self._get_join_info()result=self._reindex_and_concat(join_index,left_indexer,right_indexer)result=result.__finalize__(self,method=self._merge_type)ifself.indicator:result=self._indicator_post_merge(result)self._maybe_add_join_keys(result,left_indexer,right_indexer)self._maybe_restore_index_levels(result)returnresult.__finalize__(self,method="merge")@final@cache_readonlydef_indicator_name(self)->str|None:ifisinstance(self.indicator,str):returnself.indicatorelifisinstance(self.indicator,bool):return"_merge"ifself.indicatorelseNoneelse:raiseValueError("indicator option can only accept boolean or string arguments")@finaldef_indicator_pre_merge(self,left:DataFrame,right:DataFrame)->tuple[DataFrame,DataFrame]:columns=left.columns.union(right.columns)foriin["_left_indicator","_right_indicator"]:ifiincolumns:raiseValueError("Cannot use `indicator=True` option when "f"data contains a column named {i}")ifself._indicator_nameincolumns:raiseValueError("Cannot use name of an existing column for indicator column")left=left.copy()right=right.copy()left["_left_indicator"]=1left["_left_indicator"]=left["_left_indicator"].astype("int8")right["_right_indicator"]=2right["_right_indicator"]=right["_right_indicator"].astype("int8")returnleft,right@finaldef_indicator_post_merge(self,result:DataFrame)->DataFrame:result["_left_indicator"]=result["_left_indicator"].fillna(0)result["_right_indicator"]=result["_right_indicator"].fillna(0)result[self._indicator_name]=Categorical((result["_left_indicator"]+result["_right_indicator"]),categories=[1,2,3],)result[self._indicator_name]=result[self._indicator_name].cat.rename_categories(["left_only","right_only","both"])result=result.drop(labels=["_left_indicator","_right_indicator"],axis=1)returnresult@finaldef_maybe_restore_index_levels(self,result:DataFrame)->None:""" Restore index levels specified as `on` parameters Here we check for cases where `self.left_on` and `self.right_on` pairs each reference an index level in their respective DataFrames. The joined columns corresponding to these pairs are then restored to the index of `result`. **Note:** This method has side effects. It modifies `result` in-place Parameters ---------- result: DataFrame merge result Returns ------- None """names_to_restore=[]forname,left_key,right_keyinzip(self.join_names,self.left_on,self.right_on):if(# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible# type "Union[Hashable, ExtensionArray, Index, Series]"; expected# "Hashable"self.orig_left._is_level_reference(left_key)# type: ignore[arg-type]# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible# type "Union[Hashable, ExtensionArray, Index, Series]"; expected# "Hashable"andself.orig_right._is_level_reference(right_key# type: ignore[arg-type])andleft_key==right_keyandnamenotinresult.index.names):names_to_restore.append(name)ifnames_to_restore:result.set_index(names_to_restore,inplace=True)@finaldef_maybe_add_join_keys(self,result:DataFrame,left_indexer:npt.NDArray[np.intp]|None,right_indexer:npt.NDArray[np.intp]|None,)->None:left_has_missing=Noneright_has_missing=Noneassertall(isinstance(x,_known)forxinself.left_join_keys)keys=zip(self.join_names,self.left_on,self.right_on)fori,(name,lname,rname)inenumerate(keys):ifnot_should_fill(lname,rname):continuetake_left,take_right=None,Noneifnameinresult:ifleft_indexerisnotNoneorright_indexerisnotNone:ifnameinself.left:ifleft_has_missingisNone:left_has_missing=(Falseifleft_indexerisNoneelse(left_indexer==-1).any())ifleft_has_missing:take_right=self.right_join_keys[i]ifresult[name].dtype!=self.left[name].dtype:take_left=self.left[name]._valueselifnameinself.right:ifright_has_missingisNone:right_has_missing=(Falseifright_indexerisNoneelse(right_indexer==-1).any())ifright_has_missing:take_left=self.left_join_keys[i]ifresult[name].dtype!=self.right[name].dtype:take_right=self.right[name]._valueselse:take_left=self.left_join_keys[i]take_right=self.right_join_keys[i]iftake_leftisnotNoneortake_rightisnotNone:iftake_leftisNone:lvals=result[name]._valueselifleft_indexerisNone:lvals=take_leftelse:# TODO: can we pin down take_left's type earlier?take_left=extract_array(take_left,extract_numpy=True)lfill=na_value_for_dtype(take_left.dtype)lvals=algos.take_nd(take_left,left_indexer,fill_value=lfill)iftake_rightisNone:rvals=result[name]._valueselifright_indexerisNone:rvals=take_rightelse:# TODO: can we pin down take_right's type earlier?taker=extract_array(take_right,extract_numpy=True)rfill=na_value_for_dtype(taker.dtype)rvals=algos.take_nd(taker,right_indexer,fill_value=rfill)# if we have an all missing left_indexer# make sure to just use the right values or vice-versaifleft_indexerisnotNoneand(left_indexer==-1).all():key_col=Index(rvals)result_dtype=rvals.dtypeelifright_indexerisnotNoneand(right_indexer==-1).all():key_col=Index(lvals)result_dtype=lvals.dtypeelse:key_col=Index(lvals)ifleft_indexerisnotNone:mask_left=left_indexer==-1key_col=key_col.where(~mask_left,rvals)result_dtype=find_common_type([lvals.dtype,rvals.dtype])if(lvals.dtype.kind=="M"andrvals.dtype.kind=="M"andresult_dtype.kind=="O"):# TODO(non-nano) Workaround for common_type not dealing# with different resolutionsresult_dtype=key_col.dtypeifresult._is_label_reference(name):result[name]=result._constructor_sliced(key_col,dtype=result_dtype,index=result.index)elifresult._is_level_reference(name):ifisinstance(result.index,MultiIndex):key_col.name=nameidx_list=[result.index.get_level_values(level_name)iflevel_name!=nameelsekey_colforlevel_nameinresult.index.names]result.set_index(idx_list,inplace=True)else:result.index=Index(key_col,name=name)else:result.insert(i,nameorf"key_{i}",key_col)def_get_join_indexers(self,)->tuple[npt.NDArray[np.intp]|None,npt.NDArray[np.intp]|None]:"""return the join indexers"""# make mypy happyassertself.how!="asof"returnget_join_indexers(self.left_join_keys,self.right_join_keys,sort=self.sort,how=self.how)@finaldef_get_join_info(self,)->tuple[Index,npt.NDArray[np.intp]|None,npt.NDArray[np.intp]|None]:left_ax=self.left.indexright_ax=self.right.indexifself.left_indexandself.right_indexandself.how!="asof":join_index,left_indexer,right_indexer=left_ax.join(right_ax,how=self.how,return_indexers=True,sort=self.sort)elifself.right_indexandself.how=="left":join_index,left_indexer,right_indexer=_left_join_on_index(left_ax,right_ax,self.left_join_keys,sort=self.sort)elifself.left_indexandself.how=="right":join_index,right_indexer,left_indexer=_left_join_on_index(right_ax,left_ax,self.right_join_keys,sort=self.sort)else:(left_indexer,right_indexer)=self._get_join_indexers()ifself.right_index:iflen(self.left)>0:join_index=self._create_join_index(left_ax,right_ax,left_indexer,how="right",)elifright_indexerisNone:join_index=right_ax.copy()else:join_index=right_ax.take(right_indexer)elifself.left_index:ifself.how=="asof":# GH#33463 asof should always behave like a left mergejoin_index=self._create_join_index(left_ax,right_ax,left_indexer,how="left",)eliflen(self.right)>0:join_index=self._create_join_index(right_ax,left_ax,right_indexer,how="left",)elifleft_indexerisNone:join_index=left_ax.copy()else:join_index=left_ax.take(left_indexer)else:n=len(left_ax)ifleft_indexerisNoneelselen(left_indexer)join_index=default_index(n)returnjoin_index,left_indexer,right_indexer@finaldef_create_join_index(self,index:Index,other_index:Index,indexer:npt.NDArray[np.intp]|None,how:JoinHow="left",)->Index:""" Create a join index by rearranging one index to match another Parameters ---------- index : Index index being rearranged other_index : Index used to supply values not found in index indexer : np.ndarray[np.intp] or None how to rearrange index how : str Replacement is only necessary if indexer based on other_index. Returns ------- Index """ifself.howin(how,"outer")andnotisinstance(other_index,MultiIndex):# if final index requires values in other_index but not target# index, indexer may hold missing (-1) values, causing Index.take# to take the final value in target index. So, we set the last# element to be the desired fill value. We do not use allow_fill# and fill_value because it throws a ValueError on integer indicesmask=indexer==-1ifnp.any(mask):fill_value=na_value_for_dtype(index.dtype,compat=False)index=index.append(Index([fill_value]))ifindexerisNone:returnindex.copy()returnindex.take(indexer)@finaldef_get_merge_keys(self,)->tuple[list[ArrayLike],list[ArrayLike],list[Hashable],list[Hashable],list[Hashable],]:""" Returns ------- left_keys, right_keys, join_names, left_drop, right_drop """left_keys:list[ArrayLike]=[]right_keys:list[ArrayLike]=[]join_names:list[Hashable]=[]right_drop:list[Hashable]=[]left_drop:list[Hashable]=[]left,right=self.left,self.rightis_lkey=lambdax:isinstance(x,_known)andlen(x)==len(left)is_rkey=lambdax:isinstance(x,_known)andlen(x)==len(right)# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A# user could, for example, request 'left_index' and 'left_by'. In a# regular pd.merge(), users cannot specify both 'left_index' and# 'left_on'. (Instead, users have a MultiIndex). That means the# self.left_on in this function is always empty in a pd.merge(), but# a pd.merge_asof(left_index=True, left_by=...) will result in a# self.left_on array with a None in the middle of it. This requires# a work-around as designated in the code below.# See _validate_left_right_on() for where this happens.# ugh, spaghetti re #733if_any(self.left_on)and_any(self.right_on):forlk,rkinzip(self.left_on,self.right_on):lk=extract_array(lk,extract_numpy=True)rk=extract_array(rk,extract_numpy=True)ifis_lkey(lk):lk=cast(ArrayLike,lk)left_keys.append(lk)ifis_rkey(rk):rk=cast(ArrayLike,rk)right_keys.append(rk)join_names.append(None)# what to do?else:# Then we're either Hashable or a wrong-length arraylike,# the latter of which will raiserk=cast(Hashable,rk)ifrkisnotNone:right_keys.append(right._get_label_or_level_values(rk))join_names.append(rk)else:# work-around for merge_asof(right_index=True)right_keys.append(right.index._values)join_names.append(right.index.name)else:ifnotis_rkey(rk):# Then we're either Hashable or a wrong-length arraylike,# the latter of which will raiserk=cast(Hashable,rk)ifrkisnotNone:right_keys.append(right._get_label_or_level_values(rk))else:# work-around for merge_asof(right_index=True)right_keys.append(right.index._values)iflkisnotNoneandlk==rk:# FIXME: what about other NAs?right_drop.append(rk)else:rk=cast(ArrayLike,rk)right_keys.append(rk)iflkisnotNone:# Then we're either Hashable or a wrong-length arraylike,# the latter of which will raiselk=cast(Hashable,lk)left_keys.append(left._get_label_or_level_values(lk))join_names.append(lk)else:# work-around for merge_asof(left_index=True)left_keys.append(left.index._values)join_names.append(left.index.name)elif_any(self.left_on):forkinself.left_on:ifis_lkey(k):k=extract_array(k,extract_numpy=True)k=cast(ArrayLike,k)left_keys.append(k)join_names.append(None)else:# Then we're either Hashable or a wrong-length arraylike,# the latter of which will raisek=cast(Hashable,k)left_keys.append(left._get_label_or_level_values(k))join_names.append(k)ifisinstance(self.right.index,MultiIndex):right_keys=[lev._values.take(lev_codes)forlev,lev_codesinzip(self.right.index.levels,self.right.index.codes)]else:right_keys=[self.right.index._values]elif_any(self.right_on):forkinself.right_on:k=extract_array(k,extract_numpy=True)ifis_rkey(k):k=cast(ArrayLike,k)right_keys.append(k)join_names.append(None)else:# Then we're either Hashable or a wrong-length arraylike,# the latter of which will raisek=cast(Hashable,k)right_keys.append(right._get_label_or_level_values(k))join_names.append(k)ifisinstance(self.left.index,MultiIndex):left_keys=[lev._values.take(lev_codes)forlev,lev_codesinzip(self.left.index.levels,self.left.index.codes)]else:left_keys=[self.left.index._values]returnleft_keys,right_keys,join_names,left_drop,right_drop@finaldef_maybe_coerce_merge_keys(self)->None:# we have valid merges but we may have to further# coerce these if they are originally incompatible types## for example if these are categorical, but are not dtype_equal# or if we have object and integer dtypesforlk,rk,nameinzip(self.left_join_keys,self.right_join_keys,self.join_names):if(len(lk)andnotlen(rk))or(notlen(lk)andlen(rk)):continuelk=extract_array(lk,extract_numpy=True)rk=extract_array(rk,extract_numpy=True)lk_is_cat=isinstance(lk.dtype,CategoricalDtype)rk_is_cat=isinstance(rk.dtype,CategoricalDtype)lk_is_object_or_string=is_object_dtype(lk.dtype)oris_string_dtype(lk.dtype)rk_is_object_or_string=is_object_dtype(rk.dtype)oris_string_dtype(rk.dtype)# if either left or right is a categorical# then the must match exactly in categories & orderediflk_is_catandrk_is_cat:lk=cast(Categorical,lk)rk=cast(Categorical,rk)iflk._categories_match_up_to_permutation(rk):continueeliflk_is_catorrk_is_cat:passeliflk.dtype==rk.dtype:continuemsg=(f"You are trying to merge on {lk.dtype} and {rk.dtype} columns "f"for key '{name}'. If you wish to proceed you should use pd.concat")# if we are numeric, then allow differing# kinds to proceed, eg. int64 and int8, int and float# further if we are object, but we infer to# the same, then proceedifis_numeric_dtype(lk.dtype)andis_numeric_dtype(rk.dtype):iflk.dtype.kind==rk.dtype.kind:continueifisinstance(lk.dtype,ExtensionDtype)andnotisinstance(rk.dtype,ExtensionDtype):ct=find_common_type([lk.dtype,rk.dtype])ifisinstance(ct,ExtensionDtype):com_cls=ct.construct_array_type()rk=com_cls._from_sequence(rk,dtype=ct,copy=False)else:rk=rk.astype(ct)elifisinstance(rk.dtype,ExtensionDtype):ct=find_common_type([lk.dtype,rk.dtype])ifisinstance(ct,ExtensionDtype):com_cls=ct.construct_array_type()lk=com_cls._from_sequence(lk,dtype=ct,copy=False)else:lk=lk.astype(ct)# check whether ints and floatsifis_integer_dtype(rk.dtype)andis_float_dtype(lk.dtype):# GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> intwithnp.errstate(invalid="ignore"):# error: Argument 1 to "astype" of "ndarray" has incompatible# type "Union[ExtensionDtype, Any, dtype[Any]]"; expected# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"casted=lk.astype(rk.dtype)# type: ignore[arg-type]mask=~np.isnan(lk)match=lk==castedifnotmatch[mask].all():warnings.warn("You are merging on int and float ""columns where the float values ""are not equal to their int representation.",UserWarning,stacklevel=find_stack_level(),)continueifis_float_dtype(rk.dtype)andis_integer_dtype(lk.dtype):# GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> intwithnp.errstate(invalid="ignore"):# error: Argument 1 to "astype" of "ndarray" has incompatible# type "Union[ExtensionDtype, Any, dtype[Any]]"; expected# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"casted=rk.astype(lk.dtype)# type: ignore[arg-type]mask=~np.isnan(rk)match=rk==castedifnotmatch[mask].all():warnings.warn("You are merging on int and float ""columns where the float values ""are not equal to their int representation.",UserWarning,stacklevel=find_stack_level(),)continue# let's infer and see if we are okiflib.infer_dtype(lk,skipna=False)==lib.infer_dtype(rk,skipna=False):continue# Check if we are trying to merge on obviously# incompatible dtypes GH 9780, GH 15800# bool values are coerced to objectelif(lk_is_object_or_stringandis_bool_dtype(rk.dtype))or(is_bool_dtype(lk.dtype)andrk_is_object_or_string):pass# object values are allowed to be mergedelif(lk_is_object_or_stringandis_numeric_dtype(rk.dtype))or(is_numeric_dtype(lk.dtype)andrk_is_object_or_string):inferred_left=lib.infer_dtype(lk,skipna=False)inferred_right=lib.infer_dtype(rk,skipna=False)bool_types=["integer","mixed-integer","boolean","empty"]string_types=["string","unicode","mixed","bytes","empty"]# inferred boolifinferred_leftinbool_typesandinferred_rightinbool_types:pass# unless we are merging non-string-like with string-likeelif(inferred_leftinstring_typesandinferred_rightnotinstring_types)or(inferred_rightinstring_typesandinferred_leftnotinstring_types):raiseValueError(msg)# datetimelikes must match exactlyelifneeds_i8_conversion(lk.dtype)andnotneeds_i8_conversion(rk.dtype):raiseValueError(msg)elifnotneeds_i8_conversion(lk.dtype)andneeds_i8_conversion(rk.dtype):raiseValueError(msg)elifisinstance(lk.dtype,DatetimeTZDtype)andnotisinstance(rk.dtype,DatetimeTZDtype):raiseValueError(msg)elifnotisinstance(lk.dtype,DatetimeTZDtype)andisinstance(rk.dtype,DatetimeTZDtype):raiseValueError(msg)elif(isinstance(lk.dtype,DatetimeTZDtype)andisinstance(rk.dtype,DatetimeTZDtype))or(lk.dtype.kind=="M"andrk.dtype.kind=="M"):# allows datetime with different resolutionscontinue# datetime and timedelta not allowedeliflk.dtype.kind=="M"andrk.dtype.kind=="m":raiseValueError(msg)eliflk.dtype.kind=="m"andrk.dtype.kind=="M":raiseValueError(msg)elifis_object_dtype(lk.dtype)andis_object_dtype(rk.dtype):continue# Houston, we have a problem!# let's coerce to object if the dtypes aren't# categorical, otherwise coerce to the category# dtype. If we coerced categories to object,# then we would lose type information on some# columns, and end up trying to merge# incompatible dtypes. See GH 16900.ifnameinself.left.columns:typ=cast(Categorical,lk).categories.dtypeiflk_is_catelseobjectself.left=self.left.copy()self.left[name]=self.left[name].astype(typ)ifnameinself.right.columns:typ=cast(Categorical,rk).categories.dtypeifrk_is_catelseobjectself.right=self.right.copy()self.right[name]=self.right[name].astype(typ)def_validate_left_right_on(self,left_on,right_on):left_on=com.maybe_make_list(left_on)right_on=com.maybe_make_list(right_on)# Hm, any way to make this logic less complicated??ifself.onisNoneandleft_onisNoneandright_onisNone:ifself.left_indexandself.right_index:left_on,right_on=(),()elifself.left_index:raiseMergeError("Must pass right_on or right_index=True")elifself.right_index:raiseMergeError("Must pass left_on or left_index=True")else:# use the common columnsleft_cols=self.left.columnsright_cols=self.right.columnscommon_cols=left_cols.intersection(right_cols)iflen(common_cols)==0:raiseMergeError("No common columns to perform merge on. "f"Merge options: left_on={left_on}, "f"right_on={right_on}, "f"left_index={self.left_index}, "f"right_index={self.right_index}")if(notleft_cols.join(common_cols,how="inner").is_uniqueornotright_cols.join(common_cols,how="inner").is_unique):raiseMergeError(f"Data columns not unique: {common_cols!r}")left_on=right_on=common_colselifself.onisnotNone:ifleft_onisnotNoneorright_onisnotNone:raiseMergeError('Can only pass argument "on" OR "left_on" ''and "right_on", not a combination of both.')ifself.left_indexorself.right_index:raiseMergeError('Can only pass argument "on" OR "left_index" ''and "right_index", not a combination of both.')left_on=right_on=self.onelifleft_onisnotNone:ifself.left_index:raiseMergeError('Can only pass argument "left_on" OR "left_index" not both.')ifnotself.right_indexandright_onisNone:raiseMergeError('Must pass "right_on" OR "right_index".')n=len(left_on)ifself.right_index:iflen(left_on)!=self.right.index.nlevels:raiseValueError("len(left_on) must equal the number "'of levels in the index of "right"')right_on=[None]*nelifright_onisnotNone:ifself.right_index:raiseMergeError('Can only pass argument "right_on" OR "right_index" not both.')ifnotself.left_indexandleft_onisNone:raiseMergeError('Must pass "left_on" OR "left_index".')n=len(right_on)ifself.left_index:iflen(right_on)!=self.left.index.nlevels:raiseValueError("len(right_on) must equal the number "'of levels in the index of "left"')left_on=[None]*niflen(right_on)!=len(left_on):raiseValueError("len(right_on) must equal len(left_on)")returnleft_on,right_on@finaldef_validate_validate_kwd(self,validate:str)->None:# Check uniqueness of eachifself.left_index:left_unique=self.orig_left.index.is_uniqueelse:left_unique=MultiIndex.from_arrays(self.left_join_keys).is_uniqueifself.right_index:right_unique=self.orig_right.index.is_uniqueelse:right_unique=MultiIndex.from_arrays(self.right_join_keys).is_unique# Check data integrityifvalidatein["one_to_one","1:1"]:ifnotleft_uniqueandnotright_unique:raiseMergeError("Merge keys are not unique in either left ""or right dataset; not a one-to-one merge")ifnotleft_unique:raiseMergeError("Merge keys are not unique in left dataset; not a one-to-one merge")ifnotright_unique:raiseMergeError("Merge keys are not unique in right dataset; not a one-to-one merge")elifvalidatein["one_to_many","1:m"]:ifnotleft_unique:raiseMergeError("Merge keys are not unique in left dataset; not a one-to-many merge")elifvalidatein["many_to_one","m:1"]:ifnotright_unique:raiseMergeError("Merge keys are not unique in right dataset; ""not a many-to-one merge")elifvalidatein["many_to_many","m:m"]:passelse:raiseValueError(f'"{validate}" is not a valid argument. '"Valid arguments are:\n"'- "1:1"\n''- "1:m"\n''- "m:1"\n''- "m:m"\n''- "one_to_one"\n''- "one_to_many"\n''- "many_to_one"\n''- "many_to_many"')defget_join_indexers(left_keys:list[ArrayLike],right_keys:list[ArrayLike],sort:bool=False,how:JoinHow="inner",)->tuple[npt.NDArray[np.intp]|None,npt.NDArray[np.intp]|None]:""" Parameters ---------- left_keys : list[ndarray, ExtensionArray, Index, Series] right_keys : list[ndarray, ExtensionArray, Index, Series] sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] or None Indexer into the left_keys. np.ndarray[np.intp] or None Indexer into the right_keys. """assertlen(left_keys)==len(right_keys),"left_keys and right_keys must be the same length"# fast-path for empty left/rightleft_n=len(left_keys[0])right_n=len(right_keys[0])ifleft_n==0:ifhowin["left","inner"]:return_get_empty_indexer()elifnotsortandhowin["right","outer"]:return_get_no_sort_one_missing_indexer(right_n,True)elifright_n==0:ifhowin["right","inner"]:return_get_empty_indexer()elifnotsortandhowin["left","outer"]:return_get_no_sort_one_missing_indexer(left_n,False)lkey:ArrayLikerkey:ArrayLikeiflen(left_keys)>1:# get left & right join labels and num. of levels at each locationmapped=(_factorize_keys(left_keys[n],right_keys[n],sort=sort)forninrange(len(left_keys)))zipped=zip(*mapped)llab,rlab,shape=(list(x)forxinzipped)# get flat i8 keys from label listslkey,rkey=_get_join_keys(llab,rlab,tuple(shape),sort)else:lkey=left_keys[0]rkey=right_keys[0]left=Index(lkey)right=Index(rkey)if(left.is_monotonic_increasingandright.is_monotonic_increasingand(left.is_uniqueorright.is_unique)):_,lidx,ridx=left.join(right,how=how,return_indexers=True,sort=sort)else:lidx,ridx=get_join_indexers_non_unique(left._values,right._values,sort,how)iflidxisnotNoneandis_range_indexer(lidx,len(left)):lidx=NoneifridxisnotNoneandis_range_indexer(ridx,len(right)):ridx=Nonereturnlidx,ridxdefget_join_indexers_non_unique(left:ArrayLike,right:ArrayLike,sort:bool=False,how:JoinHow="inner",)->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:""" Get join indexers for left and right. Parameters ---------- left : ArrayLike right : ArrayLike sort : bool, default False how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- np.ndarray[np.intp] Indexer into left. np.ndarray[np.intp] Indexer into right. """lkey,rkey,count=_factorize_keys(left,right,sort=sort,how=how)ifcount==-1:# hash joinreturnlkey,rkeyifhow=="left":lidx,ridx=libjoin.left_outer_join(lkey,rkey,count,sort=sort)elifhow=="right":ridx,lidx=libjoin.left_outer_join(rkey,lkey,count,sort=sort)elifhow=="inner":lidx,ridx=libjoin.inner_join(lkey,rkey,count,sort=sort)elifhow=="outer":lidx,ridx=libjoin.full_outer_join(lkey,rkey,count)returnlidx,ridxdefrestore_dropped_levels_multijoin(left:MultiIndex,right:MultiIndex,dropped_level_names,join_index:Index,lindexer:npt.NDArray[np.intp],rindexer:npt.NDArray[np.intp],)->tuple[FrozenList,FrozenList,FrozenList]:""" *this is an internal non-public method* Returns the levels, labels and names of a multi-index to multi-index join. Depending on the type of join, this method restores the appropriate dropped levels of the joined multi-index. The method relies on lindexer, rindexer which hold the index positions of left and right, where a join was feasible Parameters ---------- left : MultiIndex left index right : MultiIndex right index dropped_level_names : str array list of non-common level names join_index : Index the index of the join between the common levels of left and right lindexer : np.ndarray[np.intp] left indexer rindexer : np.ndarray[np.intp] right indexer Returns ------- levels : list of Index levels of combined multiindexes labels : np.ndarray[np.intp] labels of combined multiindexes names : List[Hashable] names of combined multiindex levels """def_convert_to_multiindex(index:Index)->MultiIndex:ifisinstance(index,MultiIndex):returnindexelse:returnMultiIndex.from_arrays([index._values],names=[index.name])# For multi-multi joins with one overlapping level,# the returned index if of type Index# Assure that join_index is of type MultiIndex# so that dropped levels can be appendedjoin_index=_convert_to_multiindex(join_index)join_levels=join_index.levelsjoin_codes=join_index.codesjoin_names=join_index.names# Iterate through the levels that must be restoredfordropped_level_nameindropped_level_names:ifdropped_level_nameinleft.names:idx=leftindexer=lindexerelse:idx=rightindexer=rindexer# The index of the level name to be restoredname_idx=idx.names.index(dropped_level_name)restore_levels=idx.levels[name_idx]# Inject -1 in the codes list where a join was not possible# IOW indexer[i]=-1codes=idx.codes[name_idx]ifindexerisNone:restore_codes=codeselse:restore_codes=algos.take_nd(codes,indexer,fill_value=-1)# error: Cannot determine type of "__add__"join_levels=join_levels+[restore_levels]# type: ignore[has-type]join_codes=join_codes+[restore_codes]# type: ignore[has-type]join_names=join_names+[dropped_level_name]returnjoin_levels,join_codes,join_namesclass_OrderedMerge(_MergeOperation):_merge_type="ordered_merge"def__init__(self,left:DataFrame|Series,right:DataFrame|Series,on:IndexLabel|None=None,left_on:IndexLabel|None=None,right_on:IndexLabel|None=None,left_index:bool=False,right_index:bool=False,suffixes:Suffixes=("_x","_y"),fill_method:str|None=None,how:JoinHow|Literal["asof"]="outer",)->None:self.fill_method=fill_method_MergeOperation.__init__(self,left,right,on=on,left_on=left_on,left_index=left_index,right_index=right_index,right_on=right_on,how=how,suffixes=suffixes,sort=True,# factorize sorts)defget_result(self)->DataFrame:join_index,left_indexer,right_indexer=self._get_join_info()left_join_indexer:npt.NDArray[np.intp]|Noneright_join_indexer:npt.NDArray[np.intp]|Noneifself.fill_method=="ffill":ifleft_indexerisNone:left_join_indexer=Noneelse:left_join_indexer=libjoin.ffill_indexer(left_indexer)ifright_indexerisNone:right_join_indexer=Noneelse:right_join_indexer=libjoin.ffill_indexer(right_indexer)elifself.fill_methodisNone:left_join_indexer=left_indexerright_join_indexer=right_indexerelse:raiseValueError("fill_method must be 'ffill' or None")result=self._reindex_and_concat(join_index,left_join_indexer,right_join_indexer)self._maybe_add_join_keys(result,left_indexer,right_indexer)returnresultdef_asof_by_function(direction:str):name=f"asof_join_{direction}_on_X_by_Y"returngetattr(libjoin,name,None)class_AsOfMerge(_OrderedMerge):_merge_type="asof_merge"def__init__(self,left:DataFrame|Series,right:DataFrame|Series,on:IndexLabel|None=None,left_on:IndexLabel|None=None,right_on:IndexLabel|None=None,left_index:bool=False,right_index:bool=False,by=None,left_by=None,right_by=None,suffixes:Suffixes=("_x","_y"),how:Literal["asof"]="asof",tolerance=None,allow_exact_matches:bool=True,direction:str="backward",)->None:self.by=byself.left_by=left_byself.right_by=right_byself.tolerance=toleranceself.allow_exact_matches=allow_exact_matchesself.direction=direction# check 'direction' is validifself.directionnotin["backward","forward","nearest"]:raiseMergeError(f"direction invalid: {self.direction}")# validate allow_exact_matchesifnotis_bool(self.allow_exact_matches):msg=("allow_exact_matches must be boolean, "f"passed {self.allow_exact_matches}")raiseMergeError(msg)_OrderedMerge.__init__(self,left,right,on=on,left_on=left_on,right_on=right_on,left_index=left_index,right_index=right_index,how=how,suffixes=suffixes,fill_method=None,)def_validate_left_right_on(self,left_on,right_on):left_on,right_on=super()._validate_left_right_on(left_on,right_on)# we only allow on to be a single item for oniflen(left_on)!=1andnotself.left_index:raiseMergeError("can only asof on a key for left")iflen(right_on)!=1andnotself.right_index:raiseMergeError("can only asof on a key for right")ifself.left_indexandisinstance(self.left.index,MultiIndex):raiseMergeError("left can only have one index")ifself.right_indexandisinstance(self.right.index,MultiIndex):raiseMergeError("right can only have one index")# set 'by' columnsifself.byisnotNone:ifself.left_byisnotNoneorself.right_byisnotNone:raiseMergeError("Can only pass by OR left_by and right_by")self.left_by=self.right_by=self.byifself.left_byisNoneandself.right_byisnotNone:raiseMergeError("missing left_by")ifself.left_byisnotNoneandself.right_byisNone:raiseMergeError("missing right_by")# GH#29130 Check that merge keys do not have dtype objectifnotself.left_index:left_on_0=left_on[0]ifisinstance(left_on_0,_known):lo_dtype=left_on_0.dtypeelse:lo_dtype=(self.left._get_label_or_level_values(left_on_0).dtypeifleft_on_0inself.left.columnselseself.left.index.get_level_values(left_on_0))else:lo_dtype=self.left.index.dtypeifnotself.right_index:right_on_0=right_on[0]ifisinstance(right_on_0,_known):ro_dtype=right_on_0.dtypeelse:ro_dtype=(self.right._get_label_or_level_values(right_on_0).dtypeifright_on_0inself.right.columnselseself.right.index.get_level_values(right_on_0))else:ro_dtype=self.right.index.dtypeif(is_object_dtype(lo_dtype)oris_object_dtype(ro_dtype)oris_string_dtype(lo_dtype)oris_string_dtype(ro_dtype)):raiseMergeError(f"Incompatible merge dtype, {lo_dtype!r} and "f"{ro_dtype!r}, both sides must have numeric dtype")# add 'by' to our key-list so we can have it in the# output as a keyifself.left_byisnotNone:ifnotis_list_like(self.left_by):self.left_by=[self.left_by]ifnotis_list_like(self.right_by):self.right_by=[self.right_by]iflen(self.left_by)!=len(self.right_by):raiseMergeError("left_by and right_by must be the same length")left_on=self.left_by+list(left_on)right_on=self.right_by+list(right_on)returnleft_on,right_ondef_maybe_require_matching_dtypes(self,left_join_keys:list[ArrayLike],right_join_keys:list[ArrayLike])->None:# TODO: why do we do this for AsOfMerge but not the others?def_check_dtype_match(left:ArrayLike,right:ArrayLike,i:int)->None:ifleft.dtype!=right.dtype:ifisinstance(left.dtype,CategoricalDtype)andisinstance(right.dtype,CategoricalDtype):# The generic error message is confusing for categoricals.## In this function, the join keys include both the original# ones of the merge_asof() call, and also the keys passed# to its by= argument. Unordered but equal categories# are not supported for the former, but will fail# later with a ValueError, so we don't *need* to check# for them here.msg=(f"incompatible merge keys [{i}] {left.dtype!r} and "f"{right.dtype!r}, both sides category, but not equal ones")else:msg=(f"incompatible merge keys [{i}] {left.dtype!r} and "f"{right.dtype!r}, must be the same type")raiseMergeError(msg)# validate index types are the samefori,(lk,rk)inenumerate(zip(left_join_keys,right_join_keys)):_check_dtype_match(lk,rk,i)ifself.left_index:lt=self.left.index._valueselse:lt=left_join_keys[-1]ifself.right_index:rt=self.right.index._valueselse:rt=right_join_keys[-1]_check_dtype_match(lt,rt,0)def_validate_tolerance(self,left_join_keys:list[ArrayLike])->None:# validate tolerance; datetime.timedelta or Timedelta if we have a DTIifself.toleranceisnotNone:ifself.left_index:lt=self.left.index._valueselse:lt=left_join_keys[-1]msg=(f"incompatible tolerance {self.tolerance}, must be compat "f"with type {lt.dtype!r}")ifneeds_i8_conversion(lt.dtype)or(isinstance(lt,ArrowExtensionArray)andlt.dtype.kindin"mM"):ifnotisinstance(self.tolerance,datetime.timedelta):raiseMergeError(msg)ifself.tolerance<Timedelta(0):raiseMergeError("tolerance must be positive")elifis_integer_dtype(lt.dtype):ifnotis_integer(self.tolerance):raiseMergeError(msg)ifself.tolerance<0:raiseMergeError("tolerance must be positive")elifis_float_dtype(lt.dtype):ifnotis_number(self.tolerance):raiseMergeError(msg)# error: Unsupported operand types for > ("int" and "Number")ifself.tolerance<0:# type: ignore[operator]raiseMergeError("tolerance must be positive")else:raiseMergeError("key must be integer, timestamp or float")def_convert_values_for_libjoin(self,values:AnyArrayLike,side:str)->np.ndarray:# we require sortedness and non-null values in the join keysifnotIndex(values).is_monotonic_increasing:ifisna(values).any():raiseValueError(f"Merge keys contain null values on {side} side")raiseValueError(f"{side} keys must be sorted")ifisinstance(values,ArrowExtensionArray):values=values._maybe_convert_datelike_array()ifneeds_i8_conversion(values.dtype):values=values.view("i8")elifisinstance(values,BaseMaskedArray):# we've verified above that no nulls existvalues=values._dataelifisinstance(values,ExtensionArray):values=values.to_numpy()# error: Incompatible return value type (got "Union[ExtensionArray,# Any, ndarray[Any, Any], ndarray[Any, dtype[Any]], Index, Series]",# expected "ndarray[Any, Any]")returnvalues# type: ignore[return-value]def_get_join_indexers(self)->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:"""return the join indexers"""# values to compareleft_values=(self.left.index._valuesifself.left_indexelseself.left_join_keys[-1])right_values=(self.right.index._valuesifself.right_indexelseself.right_join_keys[-1])# _maybe_require_matching_dtypes already checked for dtype matchingassertleft_values.dtype==right_values.dtypetolerance=self.toleranceiftoleranceisnotNone:# TODO: can we reuse a tolerance-conversion function from# e.g. TimedeltaIndex?ifneeds_i8_conversion(left_values.dtype)or(isinstance(left_values,ArrowExtensionArray)andleft_values.dtype.kindin"mM"):tolerance=Timedelta(tolerance)# TODO: we have no test cases with PeriodDtype here; probably# need to adjust tolerance for that case.ifleft_values.dtype.kindin"mM":# Make sure the i8 representation for tolerance# matches that for left_values/right_values.ifisinstance(left_values,ArrowExtensionArray):unit=left_values.dtype.pyarrow_dtype.unitelse:unit=ensure_wrapped_if_datetimelike(left_values).unittolerance=tolerance.as_unit(unit)tolerance=tolerance._value# initial type conversion as neededleft_values=self._convert_values_for_libjoin(left_values,"left")right_values=self._convert_values_for_libjoin(right_values,"right")# a "by" parameter requires special handlingifself.left_byisnotNone:# remove 'on' parameter from values if one existedifself.left_indexandself.right_index:left_join_keys=self.left_join_keysright_join_keys=self.right_join_keyselse:left_join_keys=self.left_join_keys[0:-1]right_join_keys=self.right_join_keys[0:-1]mapped=[_factorize_keys(left_join_keys[n],right_join_keys[n],sort=False,)forninrange(len(left_join_keys))]iflen(left_join_keys)==1:left_by_values=mapped[0][0]right_by_values=mapped[0][1]else:arrs=[np.concatenate(m[:2])forminmapped]shape=tuple(m[2]forminmapped)group_index=get_group_index(arrs,shape=shape,sort=False,xnull=False)left_len=len(left_join_keys[0])left_by_values=group_index[:left_len]right_by_values=group_index[left_len:]left_by_values=ensure_int64(left_by_values)right_by_values=ensure_int64(right_by_values)# choose appropriate function by typefunc=_asof_by_function(self.direction)returnfunc(left_values,right_values,left_by_values,right_by_values,self.allow_exact_matches,tolerance,)else:# choose appropriate function by typefunc=_asof_by_function(self.direction)returnfunc(left_values,right_values,None,None,self.allow_exact_matches,tolerance,False,)def_get_multiindex_indexer(join_keys:list[ArrayLike],index:MultiIndex,sort:bool)->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:# left & right join labels and num. of levels at each locationmapped=(_factorize_keys(index.levels[n]._values,join_keys[n],sort=sort)forninrange(index.nlevels))zipped=zip(*mapped)rcodes,lcodes,shape=(list(x)forxinzipped)ifsort:rcodes=list(map(np.take,rcodes,index.codes))else:i8copy=lambdaa:a.astype("i8",subok=False)rcodes=list(map(i8copy,index.codes))# fix right labels if there were any nullsfori,join_keyinenumerate(join_keys):mask=index.codes[i]==-1ifmask.any():# check if there already was any nulls at this location# if there was, it is factorized to `shape[i] - 1`a=join_key[lcodes[i]==shape[i]-1]ifa.size==0ornota[0]!=a[0]:shape[i]+=1rcodes[i][mask]=shape[i]-1# get flat i8 join keyslkey,rkey=_get_join_keys(lcodes,rcodes,tuple(shape),sort)returnlkey,rkeydef_get_empty_indexer()->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:"""Return empty join indexers."""return(np.array([],dtype=np.intp),np.array([],dtype=np.intp),)def_get_no_sort_one_missing_indexer(n:int,left_missing:bool)->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:""" Return join indexers where all of one side is selected without sorting and none of the other side is selected. Parameters ---------- n : int Length of indexers to create. left_missing : bool If True, the left indexer will contain only -1's. If False, the right indexer will contain only -1's. Returns ------- np.ndarray[np.intp] Left indexer np.ndarray[np.intp] Right indexer """idx=np.arange(n,dtype=np.intp)idx_missing=np.full(shape=n,fill_value=-1,dtype=np.intp)ifleft_missing:returnidx_missing,idxreturnidx,idx_missingdef_left_join_on_index(left_ax:Index,right_ax:Index,join_keys:list[ArrayLike],sort:bool=False)->tuple[Index,npt.NDArray[np.intp]|None,npt.NDArray[np.intp]]:ifisinstance(right_ax,MultiIndex):lkey,rkey=_get_multiindex_indexer(join_keys,right_ax,sort=sort)else:# error: Incompatible types in assignment (expression has type# "Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]",# variable has type "ndarray[Any, dtype[signedinteger[Any]]]")lkey=join_keys[0]# type: ignore[assignment]# error: Incompatible types in assignment (expression has type "Index",# variable has type "ndarray[Any, dtype[signedinteger[Any]]]")rkey=right_ax._values# type: ignore[assignment]left_key,right_key,count=_factorize_keys(lkey,rkey,sort=sort)left_indexer,right_indexer=libjoin.left_outer_join(left_key,right_key,count,sort=sort)ifsortorlen(left_ax)!=len(left_indexer):# if asked to sort or there are 1-to-many matchesjoin_index=left_ax.take(left_indexer)returnjoin_index,left_indexer,right_indexer# left frame preserves order & length of its indexreturnleft_ax,None,right_indexerdef_factorize_keys(lk:ArrayLike,rk:ArrayLike,sort:bool=True,how:str|None=None,)->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp],int]:""" Encode left and right keys as enumerated types. This is used to get the join indexers to be used when merging DataFrames. Parameters ---------- lk : ndarray, ExtensionArray Left key. rk : ndarray, ExtensionArray Right key. sort : bool, defaults to True If True, the encoding is done such that the unique elements in the keys are sorted. how: str, optional Used to determine if we can use hash-join. If not given, then just factorize keys. Returns ------- np.ndarray[np.intp] Left (resp. right if called with `key='right'`) labels, as enumerated type. np.ndarray[np.intp] Right (resp. left if called with `key='right'`) labels, as enumerated type. int Number of unique elements in union of left and right labels. -1 if we used a hash-join. See Also -------- merge : Merge DataFrame or named Series objects with a database-style join. algorithms.factorize : Encode the object as an enumerated type or categorical variable. Examples -------- >>> lk = np.array(["a", "c", "b"]) >>> rk = np.array(["a", "c"]) Here, the unique values are `'a', 'b', 'c'`. With the default `sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`: >>> pd.core.reshape.merge._factorize_keys(lk, rk) (array([0, 2, 1]), array([0, 2]), 3) With the `sort=False`, the encoding will correspond to the order in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`: >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False) (array([0, 1, 2]), array([0, 1]), 3) """# TODO: if either is a RangeIndex, we can likely factorize more efficiently?if(isinstance(lk.dtype,DatetimeTZDtype)andisinstance(rk.dtype,DatetimeTZDtype))or(lib.is_np_dtype(lk.dtype,"M")andlib.is_np_dtype(rk.dtype,"M")):# Extract the ndarray (UTC-localized) values# Note: we dont need the dtypes to match, as these can still be comparedlk,rk=cast("DatetimeArray",lk)._ensure_matching_resos(rk)lk=cast("DatetimeArray",lk)._ndarrayrk=cast("DatetimeArray",rk)._ndarrayelif(isinstance(lk.dtype,CategoricalDtype)andisinstance(rk.dtype,CategoricalDtype)andlk.dtype==rk.dtype):assertisinstance(lk,Categorical)assertisinstance(rk,Categorical)# Cast rk to encoding so we can compare codes with lkrk=lk._encode_with_my_categories(rk)lk=ensure_int64(lk.codes)rk=ensure_int64(rk.codes)elifisinstance(lk,ExtensionArray)andlk.dtype==rk.dtype:if(isinstance(lk.dtype,ArrowDtype)andis_string_dtype(lk.dtype))or(isinstance(lk.dtype,StringDtype)andlk.dtype.storage=="pyarrow"):importpyarrowaspaimportpyarrow.computeaspclen_lk=len(lk)lk=lk._pa_array# type: ignore[attr-defined]rk=rk._pa_array# type: ignore[union-attr]dc=(pa.chunked_array(lk.chunks+rk.chunks)# type: ignore[union-attr].combine_chunks().dictionary_encode())llab,rlab,count=(pc.fill_null(dc.indices[slice(len_lk)],-1).to_numpy().astype(np.intp,copy=False),pc.fill_null(dc.indices[slice(len_lk,None)],-1).to_numpy().astype(np.intp,copy=False),len(dc.dictionary),)ifsort:uniques=dc.dictionary.to_numpy(zero_copy_only=False)llab,rlab=_sort_labels(uniques,llab,rlab)ifdc.null_count>0:lmask=llab==-1lany=lmask.any()rmask=rlab==-1rany=rmask.any()iflany:np.putmask(llab,lmask,count)ifrany:np.putmask(rlab,rmask,count)count+=1returnllab,rlab,countifnotisinstance(lk,BaseMaskedArray)andnot(# exclude arrow dtypes that would get cast to objectisinstance(lk.dtype,ArrowDtype)and(is_numeric_dtype(lk.dtype.numpy_dtype)oris_string_dtype(lk.dtype)andnotsort)):lk,_=lk._values_for_factorize()# error: Item "ndarray" of "Union[Any, ndarray]" has no attribute# "_values_for_factorize"rk,_=rk._values_for_factorize()# type: ignore[union-attr]ifneeds_i8_conversion(lk.dtype)andlk.dtype==rk.dtype:# GH#23917 TODO: Needs tests for non-matching dtypes# GH#23917 TODO: needs tests for case where lk is integer-dtype# and rk is datetime-dtypelk=np.asarray(lk,dtype=np.int64)rk=np.asarray(rk,dtype=np.int64)klass,lk,rk=_convert_arrays_and_get_rizer_klass(lk,rk)rizer=klass(max(len(lk),len(rk)),uses_mask=isinstance(rk,(BaseMaskedArray,ArrowExtensionArray)),)ifisinstance(lk,BaseMaskedArray):assertisinstance(rk,BaseMaskedArray)lk_data,lk_mask=lk._data,lk._maskrk_data,rk_mask=rk._data,rk._maskelifisinstance(lk,ArrowExtensionArray):assertisinstance(rk,ArrowExtensionArray)# we can only get here with numeric dtypes# TODO: Remove when we have a Factorizer for Arrowlk_data=lk.to_numpy(na_value=1,dtype=lk.dtype.numpy_dtype)rk_data=rk.to_numpy(na_value=1,dtype=lk.dtype.numpy_dtype)lk_mask,rk_mask=lk.isna(),rk.isna()else:# Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type# "Union[ndarray[Any, dtype[signedinteger[_64Bit]]],# ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]"lk_data,rk_data=lk,rk# type: ignore[assignment]lk_mask,rk_mask=None,Nonehash_join_available=how=="inner"andnotsortandlk.dtype.kindin"iufb"ifhash_join_available:rlab=rizer.factorize(rk_data,mask=rk_mask)ifrizer.get_count()==len(rlab):ridx,lidx=rizer.hash_inner_join(lk_data,lk_mask)returnlidx,ridx,-1else:llab=rizer.factorize(lk_data,mask=lk_mask)else:llab=rizer.factorize(lk_data,mask=lk_mask)rlab=rizer.factorize(rk_data,mask=rk_mask)assertllab.dtype==np.dtype(np.intp),llab.dtypeassertrlab.dtype==np.dtype(np.intp),rlab.dtypecount=rizer.get_count()ifsort:uniques=rizer.uniques.to_array()llab,rlab=_sort_labels(uniques,llab,rlab)# NA grouplmask=llab==-1lany=lmask.any()rmask=rlab==-1rany=rmask.any()iflanyorrany:iflany:np.putmask(llab,lmask,count)ifrany:np.putmask(rlab,rmask,count)count+=1returnllab,rlab,countdef_convert_arrays_and_get_rizer_klass(lk:ArrayLike,rk:ArrayLike)->tuple[type[libhashtable.Factorizer],ArrayLike,ArrayLike]:klass:type[libhashtable.Factorizer]ifis_numeric_dtype(lk.dtype):iflk.dtype!=rk.dtype:dtype=find_common_type([lk.dtype,rk.dtype])ifisinstance(dtype,ExtensionDtype):cls=dtype.construct_array_type()ifnotisinstance(lk,ExtensionArray):lk=cls._from_sequence(lk,dtype=dtype,copy=False)else:lk=lk.astype(dtype,copy=False)ifnotisinstance(rk,ExtensionArray):rk=cls._from_sequence(rk,dtype=dtype,copy=False)else:rk=rk.astype(dtype,copy=False)else:lk=lk.astype(dtype,copy=False)rk=rk.astype(dtype,copy=False)ifisinstance(lk,BaseMaskedArray):# Invalid index type "type" for "Dict[Type[object], Type[Factorizer]]";# expected type "Type[object]"klass=_factorizers[lk.dtype.type]# type: ignore[index]elifisinstance(lk.dtype,ArrowDtype):klass=_factorizers[lk.dtype.numpy_dtype.type]else:klass=_factorizers[lk.dtype.type]else:klass=libhashtable.ObjectFactorizerlk=ensure_object(lk)rk=ensure_object(rk)returnklass,lk,rkdef_sort_labels(uniques:np.ndarray,left:npt.NDArray[np.intp],right:npt.NDArray[np.intp])->tuple[npt.NDArray[np.intp],npt.NDArray[np.intp]]:llength=len(left)labels=np.concatenate([left,right])_,new_labels=algos.safe_sort(uniques,labels,use_na_sentinel=True)new_left,new_right=new_labels[:llength],new_labels[llength:]returnnew_left,new_rightdef_get_join_keys(llab:list[npt.NDArray[np.int64|np.intp]],rlab:list[npt.NDArray[np.int64|np.intp]],shape:Shape,sort:bool,)->tuple[npt.NDArray[np.int64],npt.NDArray[np.int64]]:# how many levels can be done without overflownlev=next(levforlevinrange(len(shape),0,-1)ifnotis_int64_overflow_possible(shape[:lev]))# get keys for the first `nlev` levelsstride=np.prod(shape[1:nlev],dtype="i8")lkey=stride*llab[0].astype("i8",subok=False,copy=False)rkey=stride*rlab[0].astype("i8",subok=False,copy=False)foriinrange(1,nlev):withnp.errstate(divide="ignore"):stride//=shape[i]lkey+=llab[i]*striderkey+=rlab[i]*strideifnlev==len(shape):# all done!returnlkey,rkey# densify current keys to avoid overflowlkey,rkey,count=_factorize_keys(lkey,rkey,sort=sort)llab=[lkey]+llab[nlev:]rlab=[rkey]+rlab[nlev:]shape=(count,)+shape[nlev:]return_get_join_keys(llab,rlab,shape,sort)def_should_fill(lname,rname)->bool:ifnotisinstance(lname,str)ornotisinstance(rname,str):returnTruereturnlname==rnamedef_any(x)->bool:returnxisnotNoneandcom.any_not_none(*x)def_validate_operand(obj:DataFrame|Series)->DataFrame:ifisinstance(obj,ABCDataFrame):returnobjelifisinstance(obj,ABCSeries):ifobj.nameisNone:raiseValueError("Cannot merge a Series without a name")returnobj.to_frame()else:raiseTypeError(f"Can only merge Series or DataFrame objects, a {type(obj)} was passed")def_items_overlap_with_suffix(left:Index,right:Index,suffixes:Suffixes)->tuple[Index,Index]:""" Suffixes type validation. If two indices overlap, add suffixes to overlapping entries. If corresponding suffix is empty, the entry is simply converted to string. """ifnotis_list_like(suffixes,allow_sets=False)orisinstance(suffixes,dict):raiseTypeError(f"Passing 'suffixes' as a {type(suffixes)}, is not supported. ""Provide 'suffixes' as a tuple instead.")to_rename=left.intersection(right)iflen(to_rename)==0:returnleft,rightlsuffix,rsuffix=suffixesifnotlsuffixandnotrsuffix:raiseValueError(f"columns overlap but no suffix specified: {to_rename}")defrenamer(x,suffix:str|None):""" Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name """ifxinto_renameandsuffixisnotNone:returnf"{x}{suffix}"returnxlrenamer=partial(renamer,suffix=lsuffix)rrenamer=partial(renamer,suffix=rsuffix)llabels=left._transform_index(lrenamer)rlabels=right._transform_index(rrenamer)dups=[]ifnotllabels.is_unique:# Only warn when duplicates are caused because of suffixes, already duplicated# columns in origin should not warndups=llabels[(llabels.duplicated())&(~left.duplicated())].tolist()ifnotrlabels.is_unique:dups.extend(rlabels[(rlabels.duplicated())&(~right.duplicated())].tolist())ifdups:raiseMergeError(f"Passing 'suffixes' which cause duplicate columns {set(dups)} is "f"not allowed.",)returnllabels,rlabels