--- /dev/null
+"""
+学習データファイルの読み込みデータから、学習データ形式に変換するモジュール
+
+学習データx:array[総データファイル][日][価格データ]を想定
+学習データy: array[総データファイル][正解ラベル]
+
+となるように生成する。
+
+"""
+
+def ___genarateSingleData(hoge):
+ """
+ 単一のデータオブジェクトから、学習データ1行分を生成する。
+ この関数で生成されるデータが1ファイル分のデータとなる。
+ """
+ return 1,0
+
+def generateData(hoge):
+ """
+ Data型のリストから結合データを生成する
+ """
+
def generateDataset(self,dataList):
graphdata = [] #type: List[float]
- labeldata = [] #type: List[float]
+ labeldata = [] #type: List[int]
data ="" #type: Data
for data in dataList:
temgraphdata = self.createGraphData(data.getData());
#TODO データサイズがマジックナンバーはやばい。
- if(data.getLable() != '' and len(temgraphdata)==10):
- labeldata.append(data.getLable())
+ if(data.getLable() != '' and len(temgraphdata)==500):
+ labeldata.append(int(data.getLable()))
graphdata.append(temgraphdata)
return graphdata,labeldata
for block in strline:
floatLine.append(float(block))
#TODO floatLineの変換処理(データ正規化)を入れておく
- result.append(floatLine)
+ result.extend(floatLine)
return result
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
+from chainer.training import extensions
import math
from DataReader import DataReader
from DatasetGenerator import DatasetGenerator
class MyChain(Chain):
def __init__(self):
- super(Chain,self).__init__(
- l1=L.Linear(50,30),
- l2=L.Linear(30,9)
+ super(MyChain,self).__init__(
+ l1=L.Linear(500,100),
+ l2=L.Linear(100,100),
+ l3=L.Linear(100,10),
)
- def __call__(self,x):
- h = F.sigmoid(self.l1(x))
- o = self.l2(h)
- return o
+ def __call__(self,x,t):
+ return F.softmax_cross_entropy(self.fwd(x),t)
+
+ def fwd(self,x):
+ h1=F.relu(self.l1(x))
+ h2=F.relu(self.l2(h1))
+ return self.l3(h2)
+
class MyClassifer(Chain):
def __init__(self,predictor):
def main():
# try:
#モデルを準備
- model = L.Classifier(MyChain)
+ model = MyChain()
#オプティマイザを準備
- optimizer = optimizers.Adam
- optimizer(model)
-
- #データ用意
- train_data = []
- train_label= []
- #元データ生成
- reader = DataReader() #type DataReader
- dgene = DatasetGenerator()#type DataSetGenerator
-
- dataList = []
- dataList = reader.createLearningData()
- train_data,train_label = dgene.generateDataset(dataList)
- dust,batchsize=math.modf(len(dataList)/2)
- batchsize = int(batchsize)
+ optimizer = optimizers.Adam()
+ optimizer.setup(model)
+
+ #データを準備
+ train,test = dataPreparation()
+
+ updater = training.StandardUpdater(train,optimizer)
+ trainer = training.Trainer(updater,(10,'epoch'))
+ trainer.extend(extensions.progress_bar)
+ trainer.run()
+
+def dataPreparation():
+ return "hoge"
+
+def dataPreparation_back():
+ # データ用意
+ train_data = []
+ train_label = []
+ # 元データ生成
+ reader = DataReader() # type DataReader
+ dgene = DatasetGenerator() # type DataSetGenerator
+ dataist = []
+ dataList = reader.createLearningData()
+ train_data, train_label = dgene.generateDataset(dataList)
+ dust, batchsize = math.modf(len(dataList) / 2)
+ batchsize = int(batchsize)
+ train = datasets.tuple_dataset.TupleDataset(train_data[0:batchsize], train_label[0:batchsize])
+ test = datasets.tuple_dataset.TupleDataset(train_data[batchsize:], train_label[batchsize:])
+ return train,test
main()
\ No newline at end of file
--- /dev/null
+from keras.datasets import mnist
+from keras.layers import Dense,Input,Dropout
+from keras.models import Model
+import keras
+
+
+(x_train, y_train), (x_test, y_test) = mnist.load_data()
+
+
+print("type:"+str(type(x_train))+"ren"+str(len(x_train)))
+print("type:"+str(type(x_train[0]))+"ren:"+str(len(x_train[0])))
+print("type:"+str(type(x_train[0][0]))+"ren:"+str(len(x_train[0][0])))
+print("type:"+str(type(x_train[0][0][0])))
+x_train = x_train.reshape(60000, 784).astype('float32') /255
+x_test = x_test.reshape(10000, 784).astype('float32') /255
+
+y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
+y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
+
+inputs = Input(shape=(784,))
+
+nw = Dense(512, activation='relu')(inputs)
+nw = Dropout(.5)(nw)
+nw = Dense(512, activation='relu')(nw)
+nw = Dropout(.5)(nw)
+predictions = Dense(10, activation='softmax')(nw)
+
+# モデルの定義(インプットとレイヤーを指定)
+#model = Model(inputs=inputs, outputs=predictions)
+
+#model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
+
+#history = model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=1, validation_data=(x_test, y_test))