100일 챌린지/빅데이터기반 인공지능 융합 서비스 개발자

Day 90 - Regression

ksyke 2024. 12. 5. 17:39

목차

    데이터 준비하기

    bream_length = [25.4, 26.3, 26.5, 29.0, 29.0, 29.7, 29.7, 30.0, 30.0, 30.7, 31.0, 31.0, 
                    31.5, 32.0, 32.0, 32.0, 33.0, 33.0, 33.5, 33.5, 34.0, 34.0, 34.5, 35.0, 
                    35.0, 35.0, 35.0, 36.0, 36.0, 37.0, 38.5, 38.5, 39.5, 41.0, 41.0]
    bream_weight = [242.0, 290.0, 340.0, 363.0, 430.0, 450.0, 500.0, 390.0, 450.0, 500.0, 475.0, 500.0, 
                    500.0, 340.0, 600.0, 600.0, 700.0, 700.0, 610.0, 650.0, 575.0, 685.0, 620.0, 680.0, 
                    700.0, 725.0, 720.0, 714.0, 850.0, 1000.0, 920.0, 955.0, 925.0, 975.0, 950.0]
    smelt_length = [9.8, 10.5, 10.6, 11.0, 11.2, 11.3, 11.8, 11.8, 12.0, 12.2, 12.4, 13.0, 14.3, 15.0]
    smelt_weight = [6.7, 7.5, 7.0, 9.7, 9.8, 8.7, 10.0, 9.9, 9.8, 12.2, 13.4, 12.2, 19.7, 19.9]
    import pandas as pd
    import numpy as np
    import sklearn
    fish_length=np.array(bream_length+smelt_length)
    fish_data=fish_length.reshape(-1,1)
    fish_data
    fish_target=np.array(bream_weight+smelt_weight)
    fish_target

    선형 회귀

    from sklearn.model_selection import train_test_split
    
    X_train,X_test,y_train,y_test=train_test_split(fish_data,fish_target)
    from sklearn.linear_model import LinearRegression
    
    model=LinearRegression()
    model.fit(X_train,y_train)
    
    model.score(X_test,y_test)
    model.predict([[50]])
    print('y=wX+b')
    print('w=',model.coef_,',b=',model.intercept_)
    import matplotlib.pyplot as plt
    
    plt.scatter(X_train,y_train)
    plt.scatter(50,model.predict([[50]]))
    plt.plot([15,50],[15*model.coef_+model.intercept_,50*model.coef_+model.intercept_])

    다항 회귀

    X_train2=np.column_stack((X_train**2,X_train))
    X_test2=np.column_stack((X_test**2,X_test))
    model2=LinearRegression()
    model2.fit(X_train2,y_train)
    
    model2.score(X_test2,y_test)

    Polynomial Features

    k-Neighbors Classifier

    확률적 경사 하강법

    decision Tree Classifier