未來板MicroPython編程16:KOI¶
KOI 基本應用¶
初始化KOI¶
koi = KOI(tx='P2',rx='P12',id=1)
初始化KOI
- tx為模組藍色線,配合Robotbit Edu使用請填P2。
- rx為模組綠色線,配合Robotbit Edu使用請填P12。
人臉追蹤¶
人臉追蹤範例程式¶
from future import *
from futureKOI import KOI
screen.sync = 0
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
koi.face_yolo_load(cmd='K30')
while True:
if koi.face_detect(cmd='K31'):
screen.fill((0, 0, 0))
screen.text("Face detected:",5,10,1,(255, 255, 255))
screen.text("X:",5,20,2,(255, 255, 255))
screen.text((koi.get_re(cmd='K31')[0]),5,40,3,(255, 255, 255))
screen.text("Y:",5,70,2,(255, 255, 255))
screen.text((koi.get_re(cmd='K31')[1]),5,90,3,(255, 255, 255))
screen.refresh()
else:
screen.fill((0, 0, 0))
screen.text("No face",5,10,1,(255, 255, 255))
screen.refresh()
sleep(0.5)
特徵分類器¶
KOI特徵分類器範例程式(模型訓練)¶
from future import *
from futureKOI import KOI
items = []
i = 0
items.append('rock')
items.append('paper')
items.append('scissors')
i = 0
koi = KOI(tx='P2',rx='P12',id=1)
koi.init_cls() # init classifier
koi.screen_mode(2, cmd='K6')
screen.sync = 0
while True:
screen.fill((0, 0, 0))
if sensor.btnValue("a") and sensor.btnValue("b"):
koi.cls_save_model(model="model.json",cmd='K43') # saves the classifier model
buzzer.melody(1)
else:
if sensor.btnValue("a"):
sleep(0.2)
if not sensor.btnValue("b"):
koi.cls_add_tag(id=(items[int((i % 3 + 1) - 1)]),cmd='K41') # classifier add tag
buzzer.melody(4)
else:
if sensor.btnValue("b"):
sleep(0.2)
buzzer.tone(440,0.2)
if not sensor.btnValue("a"):
i += 1
screen.text("Now training:",0,10,1,(255, 255, 255))
screen.text((items[int((i % 3 + 1) - 1)]),0,30,2,(255, 255, 255))
screen.text("Press A to add tag",0,60,1,(255, 255, 255))
screen.text("Press B for next tag",0,80,1,(255, 255, 255))
screen.text("Press A+B to save",0,100,1,(255, 255, 255))
screen.refresh()
KOI特徵分類器範例程式(模型運行)¶
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
koi.init_cls()
koi.cls_load_model(model="model.json",cmd='K44') # loads the classifier model
while True:
if sensor.btnValue("a"):
screen.clear()
screen.text((koi.cls_run(cmd='K42')),5,10,2,(255, 255, 255)) # displays the classified tag
screen.refresh()
顏色追蹤¶
獲取色塊數值¶
koi.get_re(cmd="K15")[0] #cx
koi.get_re(cmd="K15")[1] #cy
koi.get_re(cmd="K15")[2] #w
koi.get_re(cmd="K15")[3] #h
獲取色塊數值。
顏色追蹤範例程式¶
from future import *
from futureKOI import KOI
koi.screen_mode(2, cmd='K6')
koi = KOI(tx='P2',rx='P12',id=1)
while True:
if sensor.btnValue("a"):
koi.color_cali(name="red" ,cmd='K16')
sleep(0.3)
if sensor.btnValue("b"):
if koi.color_tracking(name="red", cmd='K15'):
screen.clear()
screen.text((koi.get_re(cmd='K15')[0]),5,10,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K15')[1]),5,20,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K15')[2]),5,30,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K15')[3]),5,40,1,(255, 255, 255))
screen.refresh()
sleep(0.3)
獲取巡線數值¶
koi.get_re(cmd="K12")[0] #x1
koi.get_re(cmd="K12")[1] #y1
koi.get_re(cmd="K12")[2] #x2
koi.get_re(cmd="K12")[3] #y2
獲取巡線數值。
追蹤巡線範例程式¶
from future import *
from futureKOI import KOI
koi.screen_mode(2, cmd='K6')
koi = KOI(tx='P2',rx='P12',id=1)
while True:
if sensor.btnValue("a"):
koi.color_cali(name="red" ,cmd='K16')
sleep(0.3)
if sensor.btnValue("b"):
if koi.line_tracking(name="red" ,cmd='K12'):
screen.clear()
screen.text((koi.get_re(cmd='K12')[0]),5,10,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K12')[1]),5,20,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K12')[2]),5,30,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K12')[3]),5,40,1,(255, 255, 255))
screen.refresh()
sleep(0.3)
幾何圖形識別¶
獲取圓形數值¶
koi.get_re(cmd="K10")[0] #cx
koi.get_re(cmd="K10")[1] #cy
koi.get_re(cmd="K10")[2] #r
獲取圓形數值。
獲取矩形數值¶
koi.get_re(cmd="K11")[0]
koi.get_re(cmd="K11")[1]
koi.get_re(cmd="K11")[2]
koi.get_re(cmd="K11")[3]
獲取矩形數值。
幾何圖形識別範例程式¶
from futureKOI import KOI
from future import *
koi.screen_mode(2, cmd='K6')
koi = KOI(tx='P2',rx='P12',id=1)
while True:
if sensor.btnValue("a"):
if koi.circle_detect(th=4000, cmd='K10'):
screen.clear()
screen.text((koi.get_re(cmd='K10')[0]),5,10,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K10')[0]),5,20,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K10')[0]),5,30,1,(255, 255, 255))
screen.refresh()
sleep(0.3)
if sensor.btnValue("b"):
if koi.rectangle_detect(th=4000,cmd='K11'):
screen.clear()
screen.text((koi.get_re(cmd='K11')[0]),5,10,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K11')[1]),5,20,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K11')[2]),5,30,1,(255, 255, 255))
screen.text((koi.get_re(cmd='K11')[3]),5,40,1,(255, 255, 255))
screen.refresh()
sleep(0.3)
條碼識別¶
AprilTag數值¶
koi.get_re(cmd="K23")[0] #id
koi.get_re(cmd="K23")[1] #cx
koi.get_re(cmd="K23")[2] #cy
koi.get_re(cmd="K23")[3] #w
koi.get_re(cmd="K23")[4] #h
獲取AprilTag數值。
條碼識別範例程式¶
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
while True:
if sensor.btnValue("a"):
screen.clear()
screen.text((koi.scan_qrcode(cmd='K20')),5,10,2,(255, 255, 255))
if sensor.btnValue("b"):
screen.clear()
screen.text((koi.scan_barcode(cmd='K22')),5,10,2,(255, 255, 255))
物聯網¶
百度AI¶
百度AI人臉辨識¶
koi.baiduAI_face_detect(cmd="K75")
# returns [face token, age, sex, mask, emotion]
運行百度AI人臉辨識並返回人臉數值。
添加人臉到組別¶
koi.baiduAI_face_add(face_token="token" ,groupName="group" ,faceName="name" ,cmd="K76")
添加人臉到組別。
- face_token: 人臉特徵碼
- groupName: 組別名稱
- faceName: 人臉名稱
在組別搜尋人臉¶
koi.baiduAI_face_search(face_token="token" ,groupName="group" ,cmd="K77")[0]
在組別搜尋人臉並返回人臉名稱。
- face_token: 人臉特徵碼
- groupName: 組別名稱
物聯網文字轉語音範例程式¶
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
koi.connect_wifi(router="apname" ,pwd="password" ,cmd='K50')
while True:
if sensor.btnValue("a"):
koi.baiduAI_tts(txt='"hello"' ,cmd='K78')
sleep(0.2)
if sensor.btnValue("b"):
screen.clear()
screen.text((koi.get_ip(cmd='K54')),5,10,1,(255, 255, 255))
screen.refresh()
sleep(0.2)
語音辨識¶
語音辨識模型訓練範例程式¶
from future import *
from futureKOI import KOI
items = []
i = 0
items.append('rock')
items.append('paper')
items.append('scissors')
i = 0
koi = KOI(tx='P2',rx='P12',id=1)
koi.audio_noisetap()
koi.screen_mode(2, cmd='K6')
screen.sync = 0
while True:
screen.fill((0, 0, 0))
if sensor.btnValue("a") and sensor.btnValue("b"):
buzzer.melody(1)
koi.speech_save_model("speech.json")
else:
if sensor.btnValue("a"):
sleep(0.2)
if not sensor.btnValue("b"):
koi.speech_add_tag((items[int((i % 3 + 1) - 1)]))
else:
if sensor.btnValue("b"):
sleep(0.2)
buzzer.tone(440,0.2)
if not sensor.btnValue("a"):
i += 1
screen.text("Now training:",0,10,1,(255, 255, 255))
screen.text((items[int((i % 3 + 1) - 1)]),0,30,2,(255, 255, 255))
screen.text("Press A to add tag",0,60,1,(255, 255, 255))
screen.text("Press B for next tag",0,80,1,(255, 255, 255))
screen.text("Press A+B to save",0,100,1,(255, 255, 255))
screen.refresh()
語音辨識模型運行範例程式¶
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.audio_noisetap()
koi.speech_load_model("speech.json")
while True:
if sensor.btnValue("a"):
screen.clear()
screen.text((koi.speech_run(cmd='K65')),5,10,2,(255, 255, 255))
screen.refresh()