We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
from sklearn.datasets import load_iris from xgboost.sklearn import XGBClassifier from xgboost import plot_importance import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score #记载样本数据集 iris = load_iris() x,y = iris.data,iris.target #数据集分割 x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=123457) xgb_clf = XGBClassifier( booster = 'gbtree', objective = 'multi:softmax', num_class = 3, gamma = 0.1, max_depth = 6, reg_lambda = 2, subsample = 0.7, colsample_bytree = 0.7, min_child_weight = 3, eta = 0.1, seed = 1000, nthread = 4, ) #训练模型 xgb_clf.fit(x_train,y_train,eval_metric='auc') import m2cgen as m2c xgb_clf.base_score = 0 code = m2c.export_to_c(xgb_clf) with open ('model.c', 'w') as f: f.write(code)
Full trace:
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[24], line 3 1 import m2cgen as m2c 2 xgb_clf.base_score = 0 ----> 3 code = m2c.export_to_c(xgb_clf) 4 with open ('model.c', 'w') as f: 5 f.write(code) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\exporters.py:81, in export_to_c(model, indent, function_name) 61 """ 62 Generates a C code representation of the given model. 63 (...) 75 code : string 76 """ 77 interpreter = interpreters.CInterpreter( 78 indent=indent, 79 function_name=function_name 80 ) ---> 81 return _export(model, interpreter) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\exporters.py:459, in _export(model, interpreter) 457 def _export(model, interpreter): 458 assembler_cls = get_assembler_cls(model) --> 459 model_ast = assembler_cls(model).assemble() 460 return interpreter.interpret(model_ast) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\assemblers\boosting.py:214, in XGBoostModelAssemblerSelector.assemble(self) 213 def assemble(self): --> 214 return self.assembler.assemble() File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\assemblers\boosting.py:36, in BaseBoostingAssembler.assemble(self) 34 return self._assemble_bin_class_output(self._all_estimator_params) 35 else: ---> 36 return self._assemble_multi_class_output(self._all_estimator_params) 37 else: 38 result_ast = self._assemble_single_output(self._all_estimator_params, base_score=self._base_score) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\assemblers\boosting.py:62, in BaseBoostingAssembler._assemble_multi_class_output(self, estimator_params) 58 def _assemble_multi_class_output(self, estimator_params): 59 # Multi-class output is calculated based on discussion in 60 # https://github.com/dmlc/xgboost/issues/1746#issuecomment-295962863 61 # and the enhancement to support boosted forests in XGBoost. ---> 62 splits = _split_estimator_params_by_classes( 63 estimator_params, self._output_size, 64 self.multiclass_params_seq_len) 66 base_score = self._base_score 67 exprs = [ 68 self._assemble_single_output(e, base_score=base_score, split_idx=i) 69 for i, e in enumerate(splits) 70 ] File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\m2cgen\assemblers\boosting.py:347, in _split_estimator_params_by_classes(values, n_classes, params_seq_len) 342 def _split_estimator_params_by_classes(values, n_classes, params_seq_len): 343 # Splits are computed based on a comment 344 # https://github.com/dmlc/xgboost/issues/1746#issuecomment-267400592 345 # and the enhancement to support boosted forests in XGBoost. 346 values_len = len(values) --> 347 block_len = n_classes * params_seq_len 348 indices = list(range(values_len)) 349 indices_by_class = np.array( 350 [[indices[i:i + params_seq_len] 351 for i in range(j, values_len, block_len)] 352 for j in range(0, block_len, params_seq_len)] 353 ).reshape(n_classes, -1) TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
xgboost version '2.0.3'
The text was updated successfully, but these errors were encountered:
set num_parallel_tree in XGBClassifier e.g. XGBClassifier(booster="gbtree", tree_method="hist", num_parallel_tree=1)
num_parallel_tree
XGBClassifier
XGBClassifier(booster="gbtree", tree_method="hist", num_parallel_tree=1)
Sorry, something went wrong.
No branches or pull requests
Full trace:
xgboost version '2.0.3'
The text was updated successfully, but these errors were encountered: