天天看點

ML之LiR&2PolyR&4PolyR:使用線性回歸LiR、二次多項式回歸2PolyR、四次多項式回歸4PolyR模型在披薩資料集上拟合(train)、價格回歸預測(test)

輸出結果

ML之LiR&2PolyR&4PolyR:使用線性回歸LiR、二次多項式回歸2PolyR、四次多項式回歸4PolyR模型在披薩資料集上拟合(train)、價格回歸預測(test)
ML之LiR&2PolyR&4PolyR:使用線性回歸LiR、二次多項式回歸2PolyR、四次多項式回歸4PolyR模型在披薩資料集上拟合(train)、價格回歸預測(test)

設計思路

ML之LiR&2PolyR&4PolyR:使用線性回歸LiR、二次多項式回歸2PolyR、四次多項式回歸4PolyR模型在披薩資料集上拟合(train)、價格回歸預測(test)

核心代碼

poly4 = PolynomialFeatures(degree=4)

X_train_poly4 = poly4.fit_transform(X_train)

r_poly4 = LinearRegression()

r_poly4 .fit(X_train_poly4, y_train)

x_poly4 = poly4.transform(xx)

poly4 = r_poly4 .predict(xx_poly4)

class PolynomialFeatures(BaseEstimator, TransformerMixin):

   """Generate polynomial and interaction features.

   Generate a new feature matrix consisting of all polynomial combinations

   of the features with degree less than or equal to the specified degree.

   For example, if an input sample is two dimensional and of the form

   [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].

   Parameters

   ----------

   degree : integer

   The degree of the polynomial features. Default = 2.

   interaction_only : boolean, default = False

   If true, only interaction features are produced: features that are

   products of at most ``degree`` *distinct* input features (so not

   ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).

   include_bias : boolean

   If True (default), then include a bias column, the feature in which

   all polynomial powers are zero (i.e. a column of ones - acts as an

   intercept term in a linear model).

   Examples

   --------

   >>> X = np.arange(6).reshape(3, 2)

   >>> X

   array([[0, 1],

   [2, 3],

   [4, 5]])

   >>> poly = PolynomialFeatures(2)

   >>> poly.fit_transform(X)

   array([[  1.,   0.,   1.,   0.,   0.,   1.],

   [  1.,   2.,   3.,   4.,   6.,   9.],

   [  1.,   4.,   5.,  16.,  20.,  25.]])

   >>> poly = PolynomialFeatures(interaction_only=True)

   array([[  1.,   0.,   1.,   0.],

   [  1.,   2.,   3.,   6.],

   [  1.,   4.,   5.,  20.]])

   Attributes

   powers_ : array, shape (n_output_features, n_input_features)

   powers_[i, j] is the exponent of the jth input in the ith output.

   n_input_features_ : int

   The total number of input features.

   n_output_features_ : int

   The total number of polynomial output features. The number of output

   features is computed by iterating over all suitably sized combinations

   of input features.

   Notes

   -----

   Be aware that the number of features in the output array scales

   polynomially in the number of features of the input array, and

   exponentially in the degree. High degrees can cause overfitting.

   See :ref:`examples/linear_model/plot_polynomial_interpolation.py

   <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.

    py>`

   """

   def __init__(self, degree=2, interaction_only=False, include_bias=True):

       self.degree = degree

       self.interaction_only = interaction_only

       self.include_bias = include_bias

   @staticmethod

   def _combinations(n_features, degree, interaction_only, include_bias):

       comb = combinations if interaction_only else combinations_w_r

       start = int(not include_bias)

       return chain.from_iterable(comb(range(n_features), i) for

           i in range(start, degree + 1))

   @property

   def powers_(self):

       check_is_fitted(self, 'n_input_features_')

       combinations = self._combinations(self.n_input_features_, self.

        degree,

           self.interaction_only,

           self.include_bias)

       return np.vstack(np.bincount(c, minlength=self.n_input_features_) for

           c in combinations)

   def get_feature_names(self, input_features=None):

       """

       Return feature names for output features

       Parameters

       ----------

       input_features : list of string, length n_features, optional

           String names for input features if available. By default,

           "x0", "x1", ... "xn_features" is used.

       Returns

       -------

       output_feature_names : list of string, length n_output_features

       powers = self.powers_

       if input_features is None:

           input_features = ['x%d' % i for i in range(powers.shape[1])]

       feature_names = []

       for row in powers:

           inds = np.where(row)[0]

           if len(inds):

               name = " ".join(

                   "%s^%d" % (input_features[ind], exp) if exp != 1 else

                    input_features[ind] for

                   (ind, exp) in zip(inds, row[inds]))

           else:

               name = "1"

           feature_names.append(name)

       return feature_names

   def fit(self, X, y=None):

       Compute number of output features.

       X : array-like, shape (n_samples, n_features)

           The data.

       self : instance

       n_samples, n_features = check_array(X).shape

       combinations = self._combinations(n_features, self.degree,

       self.n_input_features_ = n_features

       self.n_output_features_ = sum(1 for _ in combinations)

       return self

   def transform(self, X):

       """Transform data to polynomial features

       X : array-like, shape [n_samples, n_features]

           The data to transform, row by row.

       XP : np.ndarray shape [n_samples, NP]

           The matrix of features, where NP is the number of polynomial

           features generated from the combination of inputs.

       check_is_fitted(self, ['n_input_features_', 'n_output_features_'])

       X = check_array(X, dtype=FLOAT_DTYPES)

       n_samples, n_features = X.shape

       if n_features != self.n_input_features_:

           raise ValueError("X shape does not match training shape")

       # allocate output data

       XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)

       for i, c in enumerate(combinations):

           :i]XP[ = X[:c].prod(1)

       return XP

繼續閱讀