0

我正在使用标准PiCamerapython 绑定从一个 Pi 3 捕获图像。

然后我通过使用batman-adv创建的 WiFi 网状网络将此图像发送到另一个 Pi 3

我为套接字部分找到的代码首先发送图像 len,然后发送实际的字节流。

这导致在 10 秒内正好有 20 张图像,即每秒 2 帧,这是一个可怕的帧速率。

我有信心,如果我可以跳过“握手”,那么我可以提高捕获率,但是如何在不知道内容长度的情况下保存/处理图像?

我尝试将图像的分辨率降低一半,但这并没有带来任何改善。

我还打印了一次尝试的图像 len,然后在下一次运行时使用最大值静态读取字节,但这导致图像不可见;大概是因为我阅读的图像超出了一个图像,所以每个文件都比实际图像多/少。

以下代码用于client.py

import io
import socket
import struct
import time
import picamera

client_socket = socket.socket()
client_socket.connect(('192.168.123.3', 6666))
connection = client_socket.makefile('wb')

try:
    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        # Start a preview and let the camera warm up for 2 seconds
        camera.start_preview()
        time.sleep(2)

        # Note the start time and construct a stream to hold image data
        # temporarily (we could write it directly to connection but in this
        # case we want to find out the size of each capture first to keep
        # our protocol simple)
        start = time.time()
        stream = io.BytesIO()

        for foo in camera.capture_continuous(stream, 'jpeg'):
            # Write the length of the capture to the stream and flush to
            # ensure it actually gets sent
            connection.write(struct.pack('<L', stream.tell()))
            connection.flush()

            # Rewind the stream and send the image data over the wire
            stream.seek(0)
            connection.write(stream.read())

            # If we've been capturing for more than 10 seconds, quit
            if time.time() - start > 10:
                break

            # Reset the stream for the next capture
            stream.seek(0)
            stream.truncate()

    # Write a length of zero to the stream to signal we're done
    connection.write(struct.pack('<L', 0))
finally:
    connection.close()
    client_socket.close()

代码为server.py

import io
import socket
import struct
from PIL import Image

server_socket = socket.socket()
server_socket.bind(('192.168.123.3', 6666))
server_socket.listen(0)
imagecounter = 1

connection = server_socket.accept()[0].makefile('rb')

try:
    while True:
        # Read the length of the image as a 32-bit unsigned int. If the
        # length is zero, quit the loop
        image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]

        if not image_len:
            break

        # Construct a stream to hold the image data and read the image
        # data from the connection
        image_stream = io.BytesIO()
        image_stream.write(connection.read(image_len))

        # Rewind the stream, save it as a file
        image_stream.seek(0)
        with open('image%s.jpg' % imagecounter, 'wb') as img:
            img.write(image_stream.read())

        imagecounter += 1

finally:
    connection.close()
    server_socket.close()

/网络是硬性要求meshad-hoc我想使用它,原因超出了这个问题的范围。

我想知道:

  1. 如何剪切图像 len 握手并仍然能够正确保存/处理图像
  2. 有没有更好的方法来加快这种传输速度?

我的目标是每秒 10 到 15 张图像,或者如果可能的话更多。

4

1 回答 1

1

我建议您在发送之前先压缩数据。您可以使用zlib模块执行此操作。例如:

import io
import socket
import struct
import time
import picamera
import zlib # need this for compression

client_socket = socket.socket()
client_socket.connect(('192.168.123.3', 6666))
connection = client_socket.makefile('wb')

try:
    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        # Start a preview and let the camera warm up for 2 seconds
        camera.start_preview()
        time.sleep(2)

        # Note the start time and construct a stream to hold image data
        # temporarily (we could write it directly to connection but in this
        # case we want to find out the size of each capture first to keep
        # our protocol simple)
        start = time.time()
        stream = io.BytesIO()

        for foo in camera.capture_continuous(stream, 'jpeg'):

            data = zlib.compress(stream.read())

            # Write the length of the capture to the stream and flush to
            # ensure it actually gets sent
            connection.write(struct.pack('<L', len(data))
            connection.flush()

            # Rewind the stream and send the image data over the wire
            stream.seek(0)
            connection.write(data)

            # If we've been capturing for more than 10 seconds, quit
            if time.time() - start > 10:
                break

            # Reset the stream for the next capture
            stream.seek(0)
            stream.truncate()

    # Write a length of zero to the stream to signal we're done
    connection.write(struct.pack('<L', 0))
finally:
    connection.close()
    client_socket.close()

你还必须在另一边解压

import io
import socket
import struct
from PIL import Image
import zlib

server_socket = socket.socket()
server_socket.bind(('192.168.123.3', 6666))
server_socket.listen(0)
imagecounter = 1

connection = server_socket.accept()[0].makefile('rb')

try:
    while True:
        # Read the length of the image as a 32-bit unsigned int. If the
        # length is zero, quit the loop
        image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]

        if not image_len:
            break

        # Construct a stream to hold the image data and read the image
        # data from the connection
        image_stream = io.BytesIO()
        image_stream.write(connection.read(image_len))

        # Rewind the stream, save it as a file
        image_stream.seek(0)
        with open('image%s.jpg' % imagecounter, 'wb') as img:
            img.write(zlib.decompress(image_stream.read()))

        imagecounter += 1

finally:
    connection.close()
    server_socket.close()

希望这应该有效!

于 2019-08-24T20:08:54.190 回答