OSDN Git Service

foo->keeras動作確認 それ以外のファイルはなんとなく修正
authorunknown <shupeluter@hotmail.com>
Sat, 9 Jun 2018 15:07:47 +0000 (00:07 +0900)
committerunknown <shupeluter@hotmail.com>
Sat, 9 Jun 2018 15:07:47 +0000 (00:07 +0900)
src/main/Python/DataGenerator.py [new file with mode: 0644]
src/main/Python/DatasetGenerator.py
src/main/Python/Lern.py
src/main/Python/__pycache__/Data.cpython-35.pyc [new file with mode: 0644]
src/main/Python/__pycache__/Data.cpython-36.pyc [new file with mode: 0644]
src/main/Python/__pycache__/DataReader.cpython-35.pyc [new file with mode: 0644]
src/main/Python/__pycache__/DataReader.cpython-36.pyc [new file with mode: 0644]
src/main/Python/__pycache__/DatasetGenerator.cpython-35.pyc [new file with mode: 0644]
src/main/Python/foo.py [new file with mode: 0644]

diff --git a/src/main/Python/DataGenerator.py b/src/main/Python/DataGenerator.py
new file mode 100644 (file)
index 0000000..3e1ae92
--- /dev/null
@@ -0,0 +1,22 @@
+"""
+学習データファイルの読み込みデータから、学習データ形式に変換するモジュール
+
+学習データx:array[総データファイル][日][価格データ]を想定
+学習データy: array[総データファイル][正解ラベル]
+
+となるように生成する。
+
+"""
+
+def ___genarateSingleData(hoge):
+    """
+    単一のデータオブジェクトから、学習データ1行分を生成する。
+    この関数で生成されるデータが1ファイル分のデータとなる。
+    """
+    return 1,0
+
+def generateData(hoge):
+    """
+    Data型のリストから結合データを生成する
+    """
+
index 663b802..18e6c35 100644 (file)
@@ -5,14 +5,14 @@ class DatasetGenerator:
 
     def generateDataset(self,dataList):
         graphdata = [] #type: List[float]
-        labeldata = [] #type: List[float]
+        labeldata = [] #type: List[int]
 
         data ="" #type: Data
         for data in dataList:
             temgraphdata = self.createGraphData(data.getData());
             #TODO データサイズがマジックナンバーはやばい。
-            if(data.getLable() != '' and len(temgraphdata)==10):
-                labeldata.append(data.getLable())
+            if(data.getLable() != '' and len(temgraphdata)==500):
+                labeldata.append(int(data.getLable()))
                 graphdata.append(temgraphdata)
 
         return graphdata,labeldata
@@ -27,6 +27,6 @@ class DatasetGenerator:
             for block in strline:
                 floatLine.append(float(block))
             #TODO floatLineの変換処理(データ正規化)を入れておく
-            result.append(floatLine)
+            result.extend(floatLine)
 
         return result
index 57754af..f1123d7 100644 (file)
@@ -3,6 +3,7 @@ from chainer import datasets, iterators, optimizers, serializers
 from chainer import Link, Chain, ChainList
 import chainer.functions as F
 import chainer.links as L
+from chainer.training import extensions
 import math
 from DataReader import DataReader
 from DatasetGenerator import DatasetGenerator
@@ -11,15 +12,20 @@ from Data import Data
 
 class MyChain(Chain):
     def __init__(self):
-        super(Chain,self).__init__(
-            l1=L.Linear(50,30),
-            l2=L.Linear(30,9)
+        super(MyChain,self).__init__(
+            l1=L.Linear(500,100),
+            l2=L.Linear(100,100),
+            l3=L.Linear(100,10),
         )
 
-    def __call__(self,x):
-        h = F.sigmoid(self.l1(x))
-        o = self.l2(h)
-        return o
+    def __call__(self,x,t):
+        return F.softmax_cross_entropy(self.fwd(x),t)
+
+    def fwd(self,x):
+        h1=F.relu(self.l1(x))
+        h2=F.relu(self.l2(h1))
+        return self.l3(h2)
+
 
 class MyClassifer(Chain):
     def __init__(self,predictor):
@@ -36,23 +42,37 @@ class MyClassifer(Chain):
 def main():
 #    try:
         #モデルを準備
-        model = L.Classifier(MyChain)
+        model   = MyChain()
 
         #オプティマイザを準備
-        optimizer = optimizers.Adam
-        optimizer(model)
-
-        #データ用意
-        train_data = []
-        train_label= []
-        #元データ生成
-        reader = DataReader() #type DataReader
-        dgene  = DatasetGenerator()#type DataSetGenerator
-
-        dataList = []
-        dataList = reader.createLearningData()
-        train_data,train_label = dgene.generateDataset(dataList)
-        dust,batchsize=math.modf(len(dataList)/2)
-        batchsize = int(batchsize)
+        optimizer = optimizers.Adam()
+        optimizer.setup(model)
+
+        #データを準備
+        train,test = dataPreparation()
+
+        updater = training.StandardUpdater(train,optimizer)
+        trainer = training.Trainer(updater,(10,'epoch'))
+        trainer.extend(extensions.progress_bar)
+        trainer.run()
+
+def dataPreparation():
+    return "hoge"
+
+def dataPreparation_back():
+    # データ用意
+    train_data = []
+    train_label = []
+    # 元データ生成
+    reader = DataReader()  # type DataReader
+    dgene = DatasetGenerator()  # type DataSetGenerator
+    dataist = []
+    dataList = reader.createLearningData()
+    train_data, train_label = dgene.generateDataset(dataList)
+    dust, batchsize = math.modf(len(dataList) / 2)
+    batchsize = int(batchsize)
+    train = datasets.tuple_dataset.TupleDataset(train_data[0:batchsize], train_label[0:batchsize])
+    test = datasets.tuple_dataset.TupleDataset(train_data[batchsize:], train_label[batchsize:])
+    return train,test
 
 main()
\ No newline at end of file
diff --git a/src/main/Python/__pycache__/Data.cpython-35.pyc b/src/main/Python/__pycache__/Data.cpython-35.pyc
new file mode 100644 (file)
index 0000000..373e461
Binary files /dev/null and b/src/main/Python/__pycache__/Data.cpython-35.pyc differ
diff --git a/src/main/Python/__pycache__/Data.cpython-36.pyc b/src/main/Python/__pycache__/Data.cpython-36.pyc
new file mode 100644 (file)
index 0000000..813ff91
Binary files /dev/null and b/src/main/Python/__pycache__/Data.cpython-36.pyc differ
diff --git a/src/main/Python/__pycache__/DataReader.cpython-35.pyc b/src/main/Python/__pycache__/DataReader.cpython-35.pyc
new file mode 100644 (file)
index 0000000..f8d69d4
Binary files /dev/null and b/src/main/Python/__pycache__/DataReader.cpython-35.pyc differ
diff --git a/src/main/Python/__pycache__/DataReader.cpython-36.pyc b/src/main/Python/__pycache__/DataReader.cpython-36.pyc
new file mode 100644 (file)
index 0000000..a3dee3d
Binary files /dev/null and b/src/main/Python/__pycache__/DataReader.cpython-36.pyc differ
diff --git a/src/main/Python/__pycache__/DatasetGenerator.cpython-35.pyc b/src/main/Python/__pycache__/DatasetGenerator.cpython-35.pyc
new file mode 100644 (file)
index 0000000..73611a8
Binary files /dev/null and b/src/main/Python/__pycache__/DatasetGenerator.cpython-35.pyc differ
diff --git a/src/main/Python/foo.py b/src/main/Python/foo.py
new file mode 100644 (file)
index 0000000..d48abb6
--- /dev/null
@@ -0,0 +1,33 @@
+from keras.datasets import mnist
+from keras.layers import Dense,Input,Dropout
+from keras.models import Model
+import keras
+
+
+(x_train, y_train), (x_test, y_test) = mnist.load_data()
+
+
+print("type:"+str(type(x_train))+"ren"+str(len(x_train)))
+print("type:"+str(type(x_train[0]))+"ren:"+str(len(x_train[0])))
+print("type:"+str(type(x_train[0][0]))+"ren:"+str(len(x_train[0][0])))
+print("type:"+str(type(x_train[0][0][0])))
+x_train = x_train.reshape(60000, 784).astype('float32') /255
+x_test = x_test.reshape(10000, 784).astype('float32') /255
+
+y_train = keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
+y_test = keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
+
+inputs = Input(shape=(784,))
+
+nw = Dense(512, activation='relu')(inputs)
+nw = Dropout(.5)(nw)
+nw = Dense(512, activation='relu')(nw)
+nw = Dropout(.5)(nw)
+predictions = Dense(10, activation='softmax')(nw)
+
+# モデルの定義(インプットとレイヤーを指定)
+#model = Model(inputs=inputs, outputs=predictions)
+
+#model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
+
+#history = model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=1, validation_data=(x_test, y_test))